drm/Merge branch 'drm-ttm-glisse' of ../drm-radeon-next into drm-core-next
Merge topic branch containing Jerome's TTM changes, contains one change from Konrad to swiotlb export. * 'drm-ttm-glisse' of ../drm-radeon-next: drm/ttm: callback move_notify any time bo placement change v4 drm/ttm: simplify memory accounting for ttm user v2 drm/ttm: isolate dma data from ttm_tt V4 drm/nouveau: enable the ttm dma pool when swiotlb is active V3 drm/radeon/kms: enable the ttm dma pool if swiotlb is on V4 drm/ttm: provide dma aware ttm page pool code V9 drm/ttm: introduce callback for ttm_tt populate & unpopulate V4 drm/ttm: merge ttm_backend and ttm_tt V5 drm/ttm: page allocation use page array instead of list drm/ttm: test for dma_address array allocation failure drm/ttm: use ttm put pages function to properly restore cache attribute drm/ttm: remove unused backend flags field drm/ttm: remove split btw highmen and lowmem page drm/ttm: remove userspace backed ttm object support swiotlb: Expose swiotlb_nr_tlb function to modules
This commit is contained in:
commit
32faa34dc5
27 changed files with 1967 additions and 974 deletions
|
@ -28,6 +28,7 @@
|
|||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "ttm/ttm_page_alloc.h"
|
||||
|
||||
#include "nouveau_drm.h"
|
||||
#include "nouveau_drv.h"
|
||||
|
@ -92,6 +93,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
|
|||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_bo *nvbo;
|
||||
size_t acc_size;
|
||||
int ret;
|
||||
|
||||
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
|
||||
|
@ -114,9 +116,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
|
|||
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
|
||||
nouveau_bo_placement_set(nvbo, flags, 0);
|
||||
|
||||
acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
|
||||
sizeof(struct nouveau_bo));
|
||||
|
||||
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
|
||||
ttm_bo_type_device, &nvbo->placement,
|
||||
align >> PAGE_SHIFT, 0, false, NULL, size,
|
||||
align >> PAGE_SHIFT, 0, false, NULL, acc_size,
|
||||
nouveau_bo_del_ttm);
|
||||
if (ret) {
|
||||
/* ttm will call nouveau_bo_del_ttm if it fails.. */
|
||||
|
@ -343,8 +348,10 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
|
|||
*mem = val;
|
||||
}
|
||||
|
||||
static struct ttm_backend *
|
||||
nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
|
||||
static struct ttm_tt *
|
||||
nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
@ -352,11 +359,13 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
|
|||
switch (dev_priv->gart_info.type) {
|
||||
#if __OS_HAS_AGP
|
||||
case NOUVEAU_GART_AGP:
|
||||
return ttm_agp_backend_init(bdev, dev->agp->bridge);
|
||||
return ttm_agp_tt_create(bdev, dev->agp->bridge,
|
||||
size, page_flags, dummy_read_page);
|
||||
#endif
|
||||
case NOUVEAU_GART_PDMA:
|
||||
case NOUVEAU_GART_HW:
|
||||
return nouveau_sgdma_init_ttm(dev);
|
||||
return nouveau_sgdma_create_ttm(bdev, size, page_flags,
|
||||
dummy_read_page);
|
||||
default:
|
||||
NV_ERROR(dev, "Unknown GART type %d\n",
|
||||
dev_priv->gart_info.type);
|
||||
|
@ -806,10 +815,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
|
|||
struct nouveau_vma *vma;
|
||||
|
||||
list_for_each_entry(vma, &nvbo->vma_list, head) {
|
||||
if (new_mem->mem_type == TTM_PL_VRAM) {
|
||||
if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
|
||||
nouveau_vm_map(vma, new_mem->mm_node);
|
||||
} else
|
||||
if (new_mem->mem_type == TTM_PL_TT &&
|
||||
if (new_mem && new_mem->mem_type == TTM_PL_TT &&
|
||||
nvbo->page_shift == vma->vm->spg_shift) {
|
||||
nouveau_vm_map_sg(vma, 0, new_mem->
|
||||
num_pages << PAGE_SHIFT,
|
||||
|
@ -1044,8 +1053,81 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
|
|||
nouveau_fence_unref(&old_fence);
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_ttm_tt_populate(struct ttm_tt *ttm)
|
||||
{
|
||||
struct ttm_dma_tt *ttm_dma = (void *)ttm;
|
||||
struct drm_nouveau_private *dev_priv;
|
||||
struct drm_device *dev;
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
if (ttm->state != tt_unpopulated)
|
||||
return 0;
|
||||
|
||||
dev_priv = nouveau_bdev(ttm->bdev);
|
||||
dev = dev_priv->dev;
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (swiotlb_nr_tbl()) {
|
||||
return ttm_dma_populate((void *)ttm, dev->dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
r = ttm_pool_populate(ttm);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
for (i = 0; i < ttm->num_pages; i++) {
|
||||
ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
|
||||
0, PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
|
||||
while (--i) {
|
||||
pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
ttm_dma->dma_address[i] = 0;
|
||||
}
|
||||
ttm_pool_unpopulate(ttm);
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
||||
{
|
||||
struct ttm_dma_tt *ttm_dma = (void *)ttm;
|
||||
struct drm_nouveau_private *dev_priv;
|
||||
struct drm_device *dev;
|
||||
unsigned i;
|
||||
|
||||
dev_priv = nouveau_bdev(ttm->bdev);
|
||||
dev = dev_priv->dev;
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (swiotlb_nr_tbl()) {
|
||||
ttm_dma_unpopulate((void *)ttm, dev->dev);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
for (i = 0; i < ttm->num_pages; i++) {
|
||||
if (ttm_dma->dma_address[i]) {
|
||||
pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
}
|
||||
}
|
||||
|
||||
ttm_pool_unpopulate(ttm);
|
||||
}
|
||||
|
||||
struct ttm_bo_driver nouveau_bo_driver = {
|
||||
.create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
|
||||
.ttm_tt_create = &nouveau_ttm_tt_create,
|
||||
.ttm_tt_populate = &nouveau_ttm_tt_populate,
|
||||
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
|
||||
.invalidate_caches = nouveau_bo_invalidate_caches,
|
||||
.init_mem_type = nouveau_bo_init_mem_type,
|
||||
.evict_flags = nouveau_bo_evict_flags,
|
||||
|
|
|
@ -178,6 +178,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
|
|||
{ "memory", nouveau_debugfs_memory_info, 0, NULL },
|
||||
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
|
||||
{ "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
|
||||
{ "ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL },
|
||||
};
|
||||
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
|
||||
|
||||
|
|
|
@ -1000,7 +1000,10 @@ extern int nouveau_sgdma_init(struct drm_device *);
|
|||
extern void nouveau_sgdma_takedown(struct drm_device *);
|
||||
extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
|
||||
uint32_t offset);
|
||||
extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
|
||||
extern struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
|
||||
unsigned long size,
|
||||
uint32_t page_flags,
|
||||
struct page *dummy_read_page);
|
||||
|
||||
/* nouveau_debugfs.c */
|
||||
#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
|
||||
|
|
|
@ -407,6 +407,12 @@ nouveau_mem_vram_init(struct drm_device *dev)
|
|||
ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
if (ret) {
|
||||
/* Reset to default value. */
|
||||
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
|
||||
}
|
||||
|
||||
|
||||
ret = nouveau_ttm_global_init(dev_priv);
|
||||
if (ret)
|
||||
|
|
|
@ -8,88 +8,30 @@
|
|||
#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
|
||||
|
||||
struct nouveau_sgdma_be {
|
||||
struct ttm_backend backend;
|
||||
/* this has to be the first field so populate/unpopulated in
|
||||
* nouve_bo.c works properly, otherwise have to move them here
|
||||
*/
|
||||
struct ttm_dma_tt ttm;
|
||||
struct drm_device *dev;
|
||||
|
||||
dma_addr_t *pages;
|
||||
unsigned nr_pages;
|
||||
bool unmap_pages;
|
||||
|
||||
u64 offset;
|
||||
bool bound;
|
||||
};
|
||||
|
||||
static int
|
||||
nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
|
||||
struct page **pages, struct page *dummy_read_page,
|
||||
dma_addr_t *dma_addrs)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||
struct drm_device *dev = nvbe->dev;
|
||||
int i;
|
||||
|
||||
NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
|
||||
|
||||
nvbe->pages = dma_addrs;
|
||||
nvbe->nr_pages = num_pages;
|
||||
nvbe->unmap_pages = true;
|
||||
|
||||
/* this code path isn't called and is incorrect anyways */
|
||||
if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
|
||||
nvbe->unmap_pages = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
|
||||
nvbe->nr_pages = --i;
|
||||
be->func->clear(be);
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_sgdma_clear(struct ttm_backend *be)
|
||||
nouveau_sgdma_destroy(struct ttm_tt *ttm)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||
struct drm_device *dev = nvbe->dev;
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
|
||||
if (nvbe->bound)
|
||||
be->func->unbind(be);
|
||||
|
||||
if (nvbe->unmap_pages) {
|
||||
while (nvbe->nr_pages--) {
|
||||
pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_sgdma_destroy(struct ttm_backend *be)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||
|
||||
if (be) {
|
||||
if (ttm) {
|
||||
NV_DEBUG(nvbe->dev, "\n");
|
||||
|
||||
if (nvbe) {
|
||||
if (nvbe->pages)
|
||||
be->func->clear(be);
|
||||
kfree(nvbe);
|
||||
}
|
||||
ttm_dma_tt_fini(&nvbe->ttm);
|
||||
kfree(nvbe);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
||||
nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
struct drm_device *dev = nvbe->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
|
||||
|
@ -99,8 +41,8 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
|||
|
||||
nvbe->offset = mem->start << PAGE_SHIFT;
|
||||
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
|
||||
for (i = 0; i < nvbe->nr_pages; i++) {
|
||||
dma_addr_t dma_offset = nvbe->pages[i];
|
||||
for (i = 0; i < ttm->num_pages; i++) {
|
||||
dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
|
||||
uint32_t offset_l = lower_32_bits(dma_offset);
|
||||
|
||||
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
|
||||
|
@ -109,14 +51,13 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
|||
}
|
||||
}
|
||||
|
||||
nvbe->bound = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv04_sgdma_unbind(struct ttm_backend *be)
|
||||
nv04_sgdma_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
struct drm_device *dev = nvbe->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
|
||||
|
@ -124,22 +65,19 @@ nv04_sgdma_unbind(struct ttm_backend *be)
|
|||
|
||||
NV_DEBUG(dev, "\n");
|
||||
|
||||
if (!nvbe->bound)
|
||||
if (ttm->state != tt_bound)
|
||||
return 0;
|
||||
|
||||
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
|
||||
for (i = 0; i < nvbe->nr_pages; i++) {
|
||||
for (i = 0; i < ttm->num_pages; i++) {
|
||||
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
|
||||
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
|
||||
}
|
||||
|
||||
nvbe->bound = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ttm_backend_func nv04_sgdma_backend = {
|
||||
.populate = nouveau_sgdma_populate,
|
||||
.clear = nouveau_sgdma_clear,
|
||||
.bind = nv04_sgdma_bind,
|
||||
.unbind = nv04_sgdma_unbind,
|
||||
.destroy = nouveau_sgdma_destroy
|
||||
|
@ -158,14 +96,14 @@ nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
|
|||
}
|
||||
|
||||
static int
|
||||
nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
||||
nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
||||
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
|
||||
dma_addr_t *list = nvbe->pages;
|
||||
dma_addr_t *list = nvbe->ttm.dma_address;
|
||||
u32 pte = mem->start << 2;
|
||||
u32 cnt = nvbe->nr_pages;
|
||||
u32 cnt = ttm->num_pages;
|
||||
|
||||
nvbe->offset = mem->start << PAGE_SHIFT;
|
||||
|
||||
|
@ -175,18 +113,17 @@ nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
|||
}
|
||||
|
||||
nv41_sgdma_flush(nvbe);
|
||||
nvbe->bound = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv41_sgdma_unbind(struct ttm_backend *be)
|
||||
nv41_sgdma_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
||||
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
|
||||
u32 pte = (nvbe->offset >> 12) << 2;
|
||||
u32 cnt = nvbe->nr_pages;
|
||||
u32 cnt = ttm->num_pages;
|
||||
|
||||
while (cnt--) {
|
||||
nv_wo32(pgt, pte, 0x00000000);
|
||||
|
@ -194,24 +131,22 @@ nv41_sgdma_unbind(struct ttm_backend *be)
|
|||
}
|
||||
|
||||
nv41_sgdma_flush(nvbe);
|
||||
nvbe->bound = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ttm_backend_func nv41_sgdma_backend = {
|
||||
.populate = nouveau_sgdma_populate,
|
||||
.clear = nouveau_sgdma_clear,
|
||||
.bind = nv41_sgdma_bind,
|
||||
.unbind = nv41_sgdma_unbind,
|
||||
.destroy = nouveau_sgdma_destroy
|
||||
};
|
||||
|
||||
static void
|
||||
nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
|
||||
nv44_sgdma_flush(struct ttm_tt *ttm)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
struct drm_device *dev = nvbe->dev;
|
||||
|
||||
nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
|
||||
nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
|
||||
nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
|
||||
if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
|
||||
NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
|
||||
|
@ -270,14 +205,14 @@ nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
|
|||
}
|
||||
|
||||
static int
|
||||
nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
||||
nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
||||
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
|
||||
dma_addr_t *list = nvbe->pages;
|
||||
dma_addr_t *list = nvbe->ttm.dma_address;
|
||||
u32 pte = mem->start << 2, tmp[4];
|
||||
u32 cnt = nvbe->nr_pages;
|
||||
u32 cnt = ttm->num_pages;
|
||||
int i;
|
||||
|
||||
nvbe->offset = mem->start << PAGE_SHIFT;
|
||||
|
@ -305,19 +240,18 @@ nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
|||
if (cnt)
|
||||
nv44_sgdma_fill(pgt, list, pte, cnt);
|
||||
|
||||
nv44_sgdma_flush(nvbe);
|
||||
nvbe->bound = true;
|
||||
nv44_sgdma_flush(ttm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv44_sgdma_unbind(struct ttm_backend *be)
|
||||
nv44_sgdma_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
||||
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
|
||||
u32 pte = (nvbe->offset >> 12) << 2;
|
||||
u32 cnt = nvbe->nr_pages;
|
||||
u32 cnt = ttm->num_pages;
|
||||
|
||||
if (pte & 0x0000000c) {
|
||||
u32 max = 4 - ((pte >> 2) & 0x3);
|
||||
|
@ -339,55 +273,47 @@ nv44_sgdma_unbind(struct ttm_backend *be)
|
|||
if (cnt)
|
||||
nv44_sgdma_fill(pgt, NULL, pte, cnt);
|
||||
|
||||
nv44_sgdma_flush(nvbe);
|
||||
nvbe->bound = false;
|
||||
nv44_sgdma_flush(ttm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ttm_backend_func nv44_sgdma_backend = {
|
||||
.populate = nouveau_sgdma_populate,
|
||||
.clear = nouveau_sgdma_clear,
|
||||
.bind = nv44_sgdma_bind,
|
||||
.unbind = nv44_sgdma_unbind,
|
||||
.destroy = nouveau_sgdma_destroy
|
||||
};
|
||||
|
||||
static int
|
||||
nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
||||
nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
struct nouveau_mem *node = mem->mm_node;
|
||||
|
||||
/* noop: bound in move_notify() */
|
||||
node->pages = nvbe->pages;
|
||||
nvbe->pages = (dma_addr_t *)node;
|
||||
nvbe->bound = true;
|
||||
node->pages = nvbe->ttm.dma_address;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_sgdma_unbind(struct ttm_backend *be)
|
||||
nv50_sgdma_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||
struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
|
||||
/* noop: unbound in move_notify() */
|
||||
nvbe->pages = node->pages;
|
||||
node->pages = NULL;
|
||||
nvbe->bound = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ttm_backend_func nv50_sgdma_backend = {
|
||||
.populate = nouveau_sgdma_populate,
|
||||
.clear = nouveau_sgdma_clear,
|
||||
.bind = nv50_sgdma_bind,
|
||||
.unbind = nv50_sgdma_unbind,
|
||||
.destroy = nouveau_sgdma_destroy
|
||||
};
|
||||
|
||||
struct ttm_backend *
|
||||
nouveau_sgdma_init_ttm(struct drm_device *dev)
|
||||
struct ttm_tt *
|
||||
nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct nouveau_sgdma_be *nvbe;
|
||||
|
||||
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
|
||||
|
@ -395,9 +321,13 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
|
|||
return NULL;
|
||||
|
||||
nvbe->dev = dev;
|
||||
nvbe->ttm.ttm.func = dev_priv->gart_info.func;
|
||||
|
||||
nvbe->backend.func = dev_priv->gart_info.func;
|
||||
return &nvbe->backend;
|
||||
if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
|
||||
kfree(nvbe);
|
||||
return NULL;
|
||||
}
|
||||
return &nvbe->ttm.ttm;
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -320,7 +320,6 @@ struct radeon_gart {
|
|||
unsigned table_size;
|
||||
struct page **pages;
|
||||
dma_addr_t *pages_addr;
|
||||
bool *ttm_alloced;
|
||||
bool ready;
|
||||
};
|
||||
|
||||
|
|
|
@ -765,8 +765,14 @@ int radeon_device_init(struct radeon_device *rdev,
|
|||
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
if (r) {
|
||||
rdev->need_dma32 = true;
|
||||
dma_bits = 32;
|
||||
printk(KERN_WARNING "radeon: No suitable DMA available.\n");
|
||||
}
|
||||
r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
if (r) {
|
||||
pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
|
||||
printk(KERN_WARNING "radeon: No coherent DMA available.\n");
|
||||
}
|
||||
|
||||
/* Registers mapping */
|
||||
/* TODO: block userspace mapping of io register */
|
||||
|
|
|
@ -157,9 +157,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
|||
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
|
||||
for (i = 0; i < pages; i++, p++) {
|
||||
if (rdev->gart.pages[p]) {
|
||||
if (!rdev->gart.ttm_alloced[p])
|
||||
pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
rdev->gart.pages[p] = NULL;
|
||||
rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
|
||||
page_base = rdev->gart.pages_addr[p];
|
||||
|
@ -191,23 +188,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
|||
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
|
||||
|
||||
for (i = 0; i < pages; i++, p++) {
|
||||
/* we reverted the patch using dma_addr in TTM for now but this
|
||||
* code stops building on alpha so just comment it out for now */
|
||||
if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */
|
||||
rdev->gart.ttm_alloced[p] = true;
|
||||
rdev->gart.pages_addr[p] = dma_addr[i];
|
||||
} else {
|
||||
/* we need to support large memory configurations */
|
||||
/* assume that unbind have already been call on the range */
|
||||
rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
|
||||
0, PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
|
||||
/* FIXME: failed to map page (return -ENOMEM?) */
|
||||
radeon_gart_unbind(rdev, offset, pages);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
rdev->gart.pages_addr[p] = dma_addr[i];
|
||||
rdev->gart.pages[p] = pagelist[i];
|
||||
if (rdev->gart.ptr) {
|
||||
page_base = rdev->gart.pages_addr[p];
|
||||
|
@ -274,12 +255,6 @@ int radeon_gart_init(struct radeon_device *rdev)
|
|||
radeon_gart_fini(rdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
|
||||
rdev->gart.num_cpu_pages, GFP_KERNEL);
|
||||
if (rdev->gart.ttm_alloced == NULL) {
|
||||
radeon_gart_fini(rdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* set GART entry to point to the dummy page by default */
|
||||
for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
|
||||
rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
|
||||
|
@ -296,10 +271,8 @@ void radeon_gart_fini(struct radeon_device *rdev)
|
|||
rdev->gart.ready = false;
|
||||
kfree(rdev->gart.pages);
|
||||
kfree(rdev->gart.pages_addr);
|
||||
kfree(rdev->gart.ttm_alloced);
|
||||
rdev->gart.pages = NULL;
|
||||
rdev->gart.pages_addr = NULL;
|
||||
rdev->gart.ttm_alloced = NULL;
|
||||
|
||||
radeon_dummy_page_fini(rdev);
|
||||
}
|
||||
|
|
|
@ -95,6 +95,7 @@ int radeon_bo_create(struct radeon_device *rdev,
|
|||
enum ttm_bo_type type;
|
||||
unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
|
||||
unsigned long max_size = 0;
|
||||
size_t acc_size;
|
||||
int r;
|
||||
|
||||
size = ALIGN(size, PAGE_SIZE);
|
||||
|
@ -117,6 +118,9 @@ int radeon_bo_create(struct radeon_device *rdev,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
|
||||
sizeof(struct radeon_bo));
|
||||
|
||||
retry:
|
||||
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
|
||||
if (bo == NULL)
|
||||
|
@ -134,8 +138,8 @@ int radeon_bo_create(struct radeon_device *rdev,
|
|||
/* Kernel allocation are uninterruptible */
|
||||
mutex_lock(&rdev->vram_mutex);
|
||||
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
|
||||
&bo->placement, page_align, 0, !kernel, NULL, size,
|
||||
&radeon_ttm_bo_destroy);
|
||||
&bo->placement, page_align, 0, !kernel, NULL,
|
||||
acc_size, &radeon_ttm_bo_destroy);
|
||||
mutex_unlock(&rdev->vram_mutex);
|
||||
if (unlikely(r != 0)) {
|
||||
if (r != -ERESTARTSYS) {
|
||||
|
|
|
@ -114,24 +114,6 @@ static void radeon_ttm_global_fini(struct radeon_device *rdev)
|
|||
}
|
||||
}
|
||||
|
||||
struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
|
||||
|
||||
static struct ttm_backend*
|
||||
radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
|
||||
{
|
||||
struct radeon_device *rdev;
|
||||
|
||||
rdev = radeon_get_rdev(bdev);
|
||||
#if __OS_HAS_AGP
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
return radeon_ttm_backend_create(rdev);
|
||||
}
|
||||
}
|
||||
|
||||
static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
|
||||
{
|
||||
return 0;
|
||||
|
@ -515,8 +497,155 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
|
|||
return radeon_fence_signaled((struct radeon_fence *)sync_obj);
|
||||
}
|
||||
|
||||
/*
|
||||
* TTM backend functions.
|
||||
*/
|
||||
struct radeon_ttm_tt {
|
||||
struct ttm_dma_tt ttm;
|
||||
struct radeon_device *rdev;
|
||||
u64 offset;
|
||||
};
|
||||
|
||||
static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
|
||||
struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct radeon_ttm_tt *gtt = (void*)ttm;
|
||||
int r;
|
||||
|
||||
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
|
||||
if (!ttm->num_pages) {
|
||||
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
|
||||
ttm->num_pages, bo_mem, ttm);
|
||||
}
|
||||
r = radeon_gart_bind(gtt->rdev, gtt->offset,
|
||||
ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
|
||||
ttm->num_pages, (unsigned)gtt->offset);
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
struct radeon_ttm_tt *gtt = (void *)ttm;
|
||||
|
||||
radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
|
||||
{
|
||||
struct radeon_ttm_tt *gtt = (void *)ttm;
|
||||
|
||||
ttm_dma_tt_fini(>t->ttm);
|
||||
kfree(gtt);
|
||||
}
|
||||
|
||||
static struct ttm_backend_func radeon_backend_func = {
|
||||
.bind = &radeon_ttm_backend_bind,
|
||||
.unbind = &radeon_ttm_backend_unbind,
|
||||
.destroy = &radeon_ttm_backend_destroy,
|
||||
};
|
||||
|
||||
struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
struct radeon_device *rdev;
|
||||
struct radeon_ttm_tt *gtt;
|
||||
|
||||
rdev = radeon_get_rdev(bdev);
|
||||
#if __OS_HAS_AGP
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
|
||||
size, page_flags, dummy_read_page);
|
||||
}
|
||||
#endif
|
||||
|
||||
gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
|
||||
if (gtt == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
gtt->ttm.ttm.func = &radeon_backend_func;
|
||||
gtt->rdev = rdev;
|
||||
if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) {
|
||||
kfree(gtt);
|
||||
return NULL;
|
||||
}
|
||||
return >t->ttm.ttm;
|
||||
}
|
||||
|
||||
static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
|
||||
{
|
||||
struct radeon_device *rdev;
|
||||
struct radeon_ttm_tt *gtt = (void *)ttm;
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
if (ttm->state != tt_unpopulated)
|
||||
return 0;
|
||||
|
||||
rdev = radeon_get_rdev(ttm->bdev);
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (swiotlb_nr_tbl()) {
|
||||
return ttm_dma_populate(>t->ttm, rdev->dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
r = ttm_pool_populate(ttm);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
for (i = 0; i < ttm->num_pages; i++) {
|
||||
gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
|
||||
0, PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
|
||||
while (--i) {
|
||||
pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
gtt->ttm.dma_address[i] = 0;
|
||||
}
|
||||
ttm_pool_unpopulate(ttm);
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
||||
{
|
||||
struct radeon_device *rdev;
|
||||
struct radeon_ttm_tt *gtt = (void *)ttm;
|
||||
unsigned i;
|
||||
|
||||
rdev = radeon_get_rdev(ttm->bdev);
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (swiotlb_nr_tbl()) {
|
||||
ttm_dma_unpopulate(>t->ttm, rdev->dev);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
for (i = 0; i < ttm->num_pages; i++) {
|
||||
if (gtt->ttm.dma_address[i]) {
|
||||
pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
}
|
||||
}
|
||||
|
||||
ttm_pool_unpopulate(ttm);
|
||||
}
|
||||
|
||||
static struct ttm_bo_driver radeon_bo_driver = {
|
||||
.create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
|
||||
.ttm_tt_create = &radeon_ttm_tt_create,
|
||||
.ttm_tt_populate = &radeon_ttm_tt_populate,
|
||||
.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
|
||||
.invalidate_caches = &radeon_invalidate_caches,
|
||||
.init_mem_type = &radeon_init_mem_type,
|
||||
.evict_flags = &radeon_evict_flags,
|
||||
|
@ -680,124 +809,6 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* TTM backend functions.
|
||||
*/
|
||||
struct radeon_ttm_backend {
|
||||
struct ttm_backend backend;
|
||||
struct radeon_device *rdev;
|
||||
unsigned long num_pages;
|
||||
struct page **pages;
|
||||
struct page *dummy_read_page;
|
||||
dma_addr_t *dma_addrs;
|
||||
bool populated;
|
||||
bool bound;
|
||||
unsigned offset;
|
||||
};
|
||||
|
||||
static int radeon_ttm_backend_populate(struct ttm_backend *backend,
|
||||
unsigned long num_pages,
|
||||
struct page **pages,
|
||||
struct page *dummy_read_page,
|
||||
dma_addr_t *dma_addrs)
|
||||
{
|
||||
struct radeon_ttm_backend *gtt;
|
||||
|
||||
gtt = container_of(backend, struct radeon_ttm_backend, backend);
|
||||
gtt->pages = pages;
|
||||
gtt->dma_addrs = dma_addrs;
|
||||
gtt->num_pages = num_pages;
|
||||
gtt->dummy_read_page = dummy_read_page;
|
||||
gtt->populated = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void radeon_ttm_backend_clear(struct ttm_backend *backend)
|
||||
{
|
||||
struct radeon_ttm_backend *gtt;
|
||||
|
||||
gtt = container_of(backend, struct radeon_ttm_backend, backend);
|
||||
gtt->pages = NULL;
|
||||
gtt->dma_addrs = NULL;
|
||||
gtt->num_pages = 0;
|
||||
gtt->dummy_read_page = NULL;
|
||||
gtt->populated = false;
|
||||
gtt->bound = false;
|
||||
}
|
||||
|
||||
|
||||
static int radeon_ttm_backend_bind(struct ttm_backend *backend,
|
||||
struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct radeon_ttm_backend *gtt;
|
||||
int r;
|
||||
|
||||
gtt = container_of(backend, struct radeon_ttm_backend, backend);
|
||||
gtt->offset = bo_mem->start << PAGE_SHIFT;
|
||||
if (!gtt->num_pages) {
|
||||
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
|
||||
gtt->num_pages, bo_mem, backend);
|
||||
}
|
||||
r = radeon_gart_bind(gtt->rdev, gtt->offset,
|
||||
gtt->num_pages, gtt->pages, gtt->dma_addrs);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
|
||||
gtt->num_pages, gtt->offset);
|
||||
return r;
|
||||
}
|
||||
gtt->bound = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
|
||||
{
|
||||
struct radeon_ttm_backend *gtt;
|
||||
|
||||
gtt = container_of(backend, struct radeon_ttm_backend, backend);
|
||||
radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
|
||||
gtt->bound = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
|
||||
{
|
||||
struct radeon_ttm_backend *gtt;
|
||||
|
||||
gtt = container_of(backend, struct radeon_ttm_backend, backend);
|
||||
if (gtt->bound) {
|
||||
radeon_ttm_backend_unbind(backend);
|
||||
}
|
||||
kfree(gtt);
|
||||
}
|
||||
|
||||
static struct ttm_backend_func radeon_backend_func = {
|
||||
.populate = &radeon_ttm_backend_populate,
|
||||
.clear = &radeon_ttm_backend_clear,
|
||||
.bind = &radeon_ttm_backend_bind,
|
||||
.unbind = &radeon_ttm_backend_unbind,
|
||||
.destroy = &radeon_ttm_backend_destroy,
|
||||
};
|
||||
|
||||
struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_ttm_backend *gtt;
|
||||
|
||||
gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
|
||||
if (gtt == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
gtt->backend.bdev = &rdev->mman.bdev;
|
||||
gtt->backend.flags = 0;
|
||||
gtt->backend.func = &radeon_backend_func;
|
||||
gtt->rdev = rdev;
|
||||
gtt->pages = NULL;
|
||||
gtt->num_pages = 0;
|
||||
gtt->dummy_read_page = NULL;
|
||||
gtt->populated = false;
|
||||
gtt->bound = false;
|
||||
return >t->backend;
|
||||
}
|
||||
|
||||
#define RADEON_DEBUGFS_MEM_TYPES 2
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
@ -820,8 +831,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
|
|||
static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
|
||||
static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
|
||||
static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
|
||||
static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
|
||||
|
@ -843,8 +854,17 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
|
|||
radeon_mem_types_list[i].name = radeon_mem_types_names[i];
|
||||
radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
|
||||
radeon_mem_types_list[i].driver_features = 0;
|
||||
radeon_mem_types_list[i].data = NULL;
|
||||
return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
|
||||
radeon_mem_types_list[i++].data = NULL;
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (swiotlb_nr_tbl()) {
|
||||
sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
|
||||
radeon_mem_types_list[i].name = radeon_mem_types_names[i];
|
||||
radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
|
||||
radeon_mem_types_list[i].driver_features = 0;
|
||||
radeon_mem_types_list[i++].data = NULL;
|
||||
}
|
||||
#endif
|
||||
return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
|
||||
|
||||
#endif
|
||||
return 0;
|
||||
|
|
|
@ -7,4 +7,8 @@ ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
|
|||
ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
|
||||
ttm_bo_manager.o
|
||||
|
||||
ifeq ($(CONFIG_SWIOTLB),y)
|
||||
ttm-y += ttm_page_alloc_dma.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_DRM_TTM) += ttm.o
|
||||
|
|
|
@ -40,45 +40,33 @@
|
|||
#include <asm/agp.h>
|
||||
|
||||
struct ttm_agp_backend {
|
||||
struct ttm_backend backend;
|
||||
struct ttm_tt ttm;
|
||||
struct agp_memory *mem;
|
||||
struct agp_bridge_data *bridge;
|
||||
};
|
||||
|
||||
static int ttm_agp_populate(struct ttm_backend *backend,
|
||||
unsigned long num_pages, struct page **pages,
|
||||
struct page *dummy_read_page,
|
||||
dma_addr_t *dma_addrs)
|
||||
static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be =
|
||||
container_of(backend, struct ttm_agp_backend, backend);
|
||||
struct page **cur_page, **last_page = pages + num_pages;
|
||||
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
|
||||
struct drm_mm_node *node = bo_mem->mm_node;
|
||||
struct agp_memory *mem;
|
||||
int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
|
||||
unsigned i;
|
||||
|
||||
mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
|
||||
mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
|
||||
if (unlikely(mem == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
mem->page_count = 0;
|
||||
for (cur_page = pages; cur_page < last_page; ++cur_page) {
|
||||
struct page *page = *cur_page;
|
||||
for (i = 0; i < ttm->num_pages; i++) {
|
||||
struct page *page = ttm->pages[i];
|
||||
|
||||
if (!page)
|
||||
page = dummy_read_page;
|
||||
page = ttm->dummy_read_page;
|
||||
|
||||
mem->pages[mem->page_count++] = page;
|
||||
}
|
||||
agp_be->mem = mem;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be =
|
||||
container_of(backend, struct ttm_agp_backend, backend);
|
||||
struct drm_mm_node *node = bo_mem->mm_node;
|
||||
struct agp_memory *mem = agp_be->mem;
|
||||
int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
|
||||
int ret;
|
||||
|
||||
mem->is_flushed = 1;
|
||||
mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
|
||||
|
@ -90,50 +78,38 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int ttm_agp_unbind(struct ttm_backend *backend)
|
||||
static int ttm_agp_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be =
|
||||
container_of(backend, struct ttm_agp_backend, backend);
|
||||
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
|
||||
|
||||
if (agp_be->mem->is_bound)
|
||||
return agp_unbind_memory(agp_be->mem);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ttm_agp_clear(struct ttm_backend *backend)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be =
|
||||
container_of(backend, struct ttm_agp_backend, backend);
|
||||
struct agp_memory *mem = agp_be->mem;
|
||||
|
||||
if (mem) {
|
||||
ttm_agp_unbind(backend);
|
||||
agp_free_memory(mem);
|
||||
if (agp_be->mem) {
|
||||
if (agp_be->mem->is_bound)
|
||||
return agp_unbind_memory(agp_be->mem);
|
||||
agp_free_memory(agp_be->mem);
|
||||
agp_be->mem = NULL;
|
||||
}
|
||||
agp_be->mem = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ttm_agp_destroy(struct ttm_backend *backend)
|
||||
static void ttm_agp_destroy(struct ttm_tt *ttm)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be =
|
||||
container_of(backend, struct ttm_agp_backend, backend);
|
||||
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
|
||||
|
||||
if (agp_be->mem)
|
||||
ttm_agp_clear(backend);
|
||||
ttm_agp_unbind(ttm);
|
||||
kfree(agp_be);
|
||||
}
|
||||
|
||||
static struct ttm_backend_func ttm_agp_func = {
|
||||
.populate = ttm_agp_populate,
|
||||
.clear = ttm_agp_clear,
|
||||
.bind = ttm_agp_bind,
|
||||
.unbind = ttm_agp_unbind,
|
||||
.destroy = ttm_agp_destroy,
|
||||
};
|
||||
|
||||
struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
|
||||
struct agp_bridge_data *bridge)
|
||||
struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
|
||||
struct agp_bridge_data *bridge,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be;
|
||||
|
||||
|
@ -143,10 +119,14 @@ struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
|
|||
|
||||
agp_be->mem = NULL;
|
||||
agp_be->bridge = bridge;
|
||||
agp_be->backend.func = &ttm_agp_func;
|
||||
agp_be->backend.bdev = bdev;
|
||||
return &agp_be->backend;
|
||||
agp_be->ttm.func = &ttm_agp_func;
|
||||
|
||||
if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &agp_be->ttm;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_agp_backend_init);
|
||||
EXPORT_SYMBOL(ttm_agp_tt_create);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -137,6 +137,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
|
|||
struct ttm_buffer_object *bo =
|
||||
container_of(list_kref, struct ttm_buffer_object, list_kref);
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
size_t acc_size = bo->acc_size;
|
||||
|
||||
BUG_ON(atomic_read(&bo->list_kref.refcount));
|
||||
BUG_ON(atomic_read(&bo->kref.refcount));
|
||||
|
@ -152,9 +153,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
|
|||
if (bo->destroy)
|
||||
bo->destroy(bo);
|
||||
else {
|
||||
ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
|
||||
kfree(bo);
|
||||
}
|
||||
ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
|
||||
}
|
||||
|
||||
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
|
||||
|
@ -337,27 +338,11 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
|
|||
if (zero_alloc)
|
||||
page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
|
||||
case ttm_bo_type_kernel:
|
||||
bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
|
||||
page_flags, glob->dummy_read_page);
|
||||
bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
|
||||
page_flags, glob->dummy_read_page);
|
||||
if (unlikely(bo->ttm == NULL))
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
case ttm_bo_type_user:
|
||||
bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
|
||||
page_flags | TTM_PAGE_FLAG_USER,
|
||||
glob->dummy_read_page);
|
||||
if (unlikely(bo->ttm == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = ttm_tt_set_user(bo->ttm, current,
|
||||
bo->buffer_start, bo->num_pages);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_tt_destroy(bo->ttm);
|
||||
bo->ttm = NULL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
|
||||
ret = -EINVAL;
|
||||
|
@ -419,9 +404,6 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
|||
}
|
||||
}
|
||||
|
||||
if (bdev->driver->move_notify)
|
||||
bdev->driver->move_notify(bo, mem);
|
||||
|
||||
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
|
||||
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
|
||||
ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
|
||||
|
@ -434,6 +416,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
|||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
if (bdev->driver->move_notify)
|
||||
bdev->driver->move_notify(bo, mem);
|
||||
|
||||
moved:
|
||||
if (bo->evicted) {
|
||||
ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
|
||||
|
@ -472,6 +457,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
|||
|
||||
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
|
||||
{
|
||||
if (bo->bdev->driver->move_notify)
|
||||
bo->bdev->driver->move_notify(bo, NULL);
|
||||
|
||||
if (bo->ttm) {
|
||||
ttm_tt_unbind(bo->ttm);
|
||||
ttm_tt_destroy(bo->ttm);
|
||||
|
@ -907,16 +895,12 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
|
|||
}
|
||||
|
||||
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
|
||||
bool disallow_fixed,
|
||||
uint32_t mem_type,
|
||||
uint32_t proposed_placement,
|
||||
uint32_t *masked_placement)
|
||||
{
|
||||
uint32_t cur_flags = ttm_bo_type_flags(mem_type);
|
||||
|
||||
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
|
||||
return false;
|
||||
|
||||
if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
|
||||
return false;
|
||||
|
||||
|
@ -961,7 +945,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|||
man = &bdev->man[mem_type];
|
||||
|
||||
type_ok = ttm_bo_mt_compatible(man,
|
||||
bo->type == ttm_bo_type_user,
|
||||
mem_type,
|
||||
placement->placement[i],
|
||||
&cur_flags);
|
||||
|
@ -1009,7 +992,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|||
if (!man->has_type)
|
||||
continue;
|
||||
if (!ttm_bo_mt_compatible(man,
|
||||
bo->type == ttm_bo_type_user,
|
||||
mem_type,
|
||||
placement->busy_placement[i],
|
||||
&cur_flags))
|
||||
|
@ -1179,6 +1161,17 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
|
|||
{
|
||||
int ret = 0;
|
||||
unsigned long num_pages;
|
||||
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
|
||||
|
||||
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
|
||||
if (ret) {
|
||||
printk(KERN_ERR TTM_PFX "Out of kernel memory.\n");
|
||||
if (destroy)
|
||||
(*destroy)(bo);
|
||||
else
|
||||
kfree(bo);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
size += buffer_start & ~PAGE_MASK;
|
||||
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
@ -1249,14 +1242,34 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
|
|||
}
|
||||
EXPORT_SYMBOL(ttm_bo_init);
|
||||
|
||||
static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
|
||||
unsigned long num_pages)
|
||||
size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
|
||||
unsigned long bo_size,
|
||||
unsigned struct_size)
|
||||
{
|
||||
size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
|
||||
PAGE_MASK;
|
||||
unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
|
||||
size_t size = 0;
|
||||
|
||||
return glob->ttm_bo_size + 2 * page_array_size;
|
||||
size += ttm_round_pot(struct_size);
|
||||
size += PAGE_ALIGN(npages * sizeof(void *));
|
||||
size += ttm_round_pot(sizeof(struct ttm_tt));
|
||||
return size;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_acc_size);
|
||||
|
||||
size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
|
||||
unsigned long bo_size,
|
||||
unsigned struct_size)
|
||||
{
|
||||
unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
|
||||
size_t size = 0;
|
||||
|
||||
size += ttm_round_pot(struct_size);
|
||||
size += PAGE_ALIGN(npages * sizeof(void *));
|
||||
size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
|
||||
size += ttm_round_pot(sizeof(struct ttm_dma_tt));
|
||||
return size;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
|
||||
|
||||
int ttm_bo_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size,
|
||||
|
@ -1270,10 +1283,10 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
|
|||
{
|
||||
struct ttm_buffer_object *bo;
|
||||
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
|
||||
size_t acc_size;
|
||||
int ret;
|
||||
|
||||
size_t acc_size =
|
||||
ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
|
||||
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
|
||||
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
@ -1459,13 +1472,6 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
|
|||
goto out_no_shrink;
|
||||
}
|
||||
|
||||
glob->ttm_bo_extra_size =
|
||||
ttm_round_pot(sizeof(struct ttm_tt)) +
|
||||
ttm_round_pot(sizeof(struct ttm_backend));
|
||||
|
||||
glob->ttm_bo_size = glob->ttm_bo_extra_size +
|
||||
ttm_round_pot(sizeof(struct ttm_buffer_object));
|
||||
|
||||
atomic_set(&glob->bo_count, 0);
|
||||
|
||||
ret = kobject_init_and_add(
|
||||
|
|
|
@ -244,7 +244,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
|
|||
unsigned long page,
|
||||
pgprot_t prot)
|
||||
{
|
||||
struct page *d = ttm_tt_get_page(ttm, page);
|
||||
struct page *d = ttm->pages[page];
|
||||
void *dst;
|
||||
|
||||
if (!d)
|
||||
|
@ -281,7 +281,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
|
|||
unsigned long page,
|
||||
pgprot_t prot)
|
||||
{
|
||||
struct page *s = ttm_tt_get_page(ttm, page);
|
||||
struct page *s = ttm->pages[page];
|
||||
void *src;
|
||||
|
||||
if (!s)
|
||||
|
@ -342,6 +342,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
|||
if (old_iomap == NULL && ttm == NULL)
|
||||
goto out2;
|
||||
|
||||
if (ttm->state == tt_unpopulated) {
|
||||
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
|
||||
if (ret)
|
||||
goto out1;
|
||||
}
|
||||
|
||||
add = 0;
|
||||
dir = 1;
|
||||
|
||||
|
@ -439,6 +445,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
|
|||
kref_init(&fbo->list_kref);
|
||||
kref_init(&fbo->kref);
|
||||
fbo->destroy = &ttm_transfered_destroy;
|
||||
fbo->acc_size = 0;
|
||||
|
||||
*new_obj = fbo;
|
||||
return 0;
|
||||
|
@ -502,10 +509,16 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
|
|||
{
|
||||
struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
struct page *d;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!ttm);
|
||||
|
||||
if (ttm->state == tt_unpopulated) {
|
||||
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
|
||||
/*
|
||||
* We're mapping a single page, and the desired
|
||||
|
@ -513,18 +526,9 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
|
|||
*/
|
||||
|
||||
map->bo_kmap_type = ttm_bo_map_kmap;
|
||||
map->page = ttm_tt_get_page(ttm, start_page);
|
||||
map->page = ttm->pages[start_page];
|
||||
map->virtual = kmap(map->page);
|
||||
} else {
|
||||
/*
|
||||
* Populate the part we're mapping;
|
||||
*/
|
||||
for (i = start_page; i < start_page + num_pages; ++i) {
|
||||
d = ttm_tt_get_page(ttm, i);
|
||||
if (!d)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to use vmap to get the desired page protection
|
||||
* or to make the buffer object look contiguous.
|
||||
|
|
|
@ -174,18 +174,23 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||
vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
|
||||
vm_get_page_prot(vma->vm_flags) :
|
||||
ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
|
||||
|
||||
/* Allocate all page at once, most common usage */
|
||||
if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
|
||||
retval = VM_FAULT_OOM;
|
||||
goto out_io_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Speculatively prefault a number of pages. Only error on
|
||||
* first page.
|
||||
*/
|
||||
|
||||
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
|
||||
if (bo->mem.bus.is_iomem)
|
||||
pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
|
||||
else {
|
||||
page = ttm_tt_get_page(ttm, page_offset);
|
||||
page = ttm->pages[page_offset];
|
||||
if (unlikely(!page && i == 0)) {
|
||||
retval = VM_FAULT_OOM;
|
||||
goto out_io_unlock;
|
||||
|
|
|
@ -395,6 +395,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
|
|||
zone->name, (unsigned long long) zone->max_mem >> 10);
|
||||
}
|
||||
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
|
||||
ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
|
||||
return 0;
|
||||
out_no_zone:
|
||||
ttm_mem_global_release(glob);
|
||||
|
@ -409,6 +410,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
|
|||
|
||||
/* let the page allocator first stop the shrink work. */
|
||||
ttm_page_alloc_fini();
|
||||
ttm_dma_page_alloc_fini();
|
||||
|
||||
flush_workqueue(glob->swap_queue);
|
||||
destroy_workqueue(glob->swap_queue);
|
||||
|
|
|
@ -619,8 +619,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
|
|||
* @return count of pages still required to fulfill the request.
|
||||
*/
|
||||
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
|
||||
struct list_head *pages, int ttm_flags,
|
||||
enum ttm_caching_state cstate, unsigned count)
|
||||
struct list_head *pages,
|
||||
int ttm_flags,
|
||||
enum ttm_caching_state cstate,
|
||||
unsigned count)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
struct list_head *p;
|
||||
|
@ -660,17 +662,67 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
|
|||
return count;
|
||||
}
|
||||
|
||||
/* Put all pages in pages list to correct pool to wait for reuse */
|
||||
static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
|
||||
enum ttm_caching_state cstate)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
|
||||
unsigned i;
|
||||
|
||||
if (pool == NULL) {
|
||||
/* No pool for this memory type so free the pages */
|
||||
for (i = 0; i < npages; i++) {
|
||||
if (pages[i]) {
|
||||
if (page_count(pages[i]) != 1)
|
||||
printk(KERN_ERR TTM_PFX
|
||||
"Erroneous page count. "
|
||||
"Leaking pages.\n");
|
||||
__free_page(pages[i]);
|
||||
pages[i] = NULL;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&pool->lock, irq_flags);
|
||||
for (i = 0; i < npages; i++) {
|
||||
if (pages[i]) {
|
||||
if (page_count(pages[i]) != 1)
|
||||
printk(KERN_ERR TTM_PFX
|
||||
"Erroneous page count. "
|
||||
"Leaking pages.\n");
|
||||
list_add_tail(&pages[i]->lru, &pool->list);
|
||||
pages[i] = NULL;
|
||||
pool->npages++;
|
||||
}
|
||||
}
|
||||
/* Check that we don't go over the pool limit */
|
||||
npages = 0;
|
||||
if (pool->npages > _manager->options.max_size) {
|
||||
npages = pool->npages - _manager->options.max_size;
|
||||
/* free at least NUM_PAGES_TO_ALLOC number of pages
|
||||
* to reduce calls to set_memory_wb */
|
||||
if (npages < NUM_PAGES_TO_ALLOC)
|
||||
npages = NUM_PAGES_TO_ALLOC;
|
||||
}
|
||||
spin_unlock_irqrestore(&pool->lock, irq_flags);
|
||||
if (npages)
|
||||
ttm_page_pool_free(pool, npages);
|
||||
}
|
||||
|
||||
/*
|
||||
* On success pages list will hold count number of correctly
|
||||
* cached pages.
|
||||
*/
|
||||
int ttm_get_pages(struct list_head *pages, int flags,
|
||||
enum ttm_caching_state cstate, unsigned count,
|
||||
dma_addr_t *dma_address)
|
||||
static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
|
||||
enum ttm_caching_state cstate)
|
||||
{
|
||||
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
|
||||
struct list_head plist;
|
||||
struct page *p = NULL;
|
||||
gfp_t gfp_flags = GFP_USER;
|
||||
unsigned count;
|
||||
int r;
|
||||
|
||||
/* set zero flag for page allocation if required */
|
||||
|
@ -684,7 +736,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
|
|||
else
|
||||
gfp_flags |= GFP_HIGHUSER;
|
||||
|
||||
for (r = 0; r < count; ++r) {
|
||||
for (r = 0; r < npages; ++r) {
|
||||
p = alloc_page(gfp_flags);
|
||||
if (!p) {
|
||||
|
||||
|
@ -693,87 +745,53 @@ int ttm_get_pages(struct list_head *pages, int flags,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
list_add(&p->lru, pages);
|
||||
pages[r] = p;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* combine zero flag to pool flags */
|
||||
gfp_flags |= pool->gfp_flags;
|
||||
|
||||
/* First we take pages from the pool */
|
||||
count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
|
||||
INIT_LIST_HEAD(&plist);
|
||||
npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
|
||||
count = 0;
|
||||
list_for_each_entry(p, &plist, lru) {
|
||||
pages[count++] = p;
|
||||
}
|
||||
|
||||
/* clear the pages coming from the pool if requested */
|
||||
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
|
||||
list_for_each_entry(p, pages, lru) {
|
||||
list_for_each_entry(p, &plist, lru) {
|
||||
clear_page(page_address(p));
|
||||
}
|
||||
}
|
||||
|
||||
/* If pool didn't have enough pages allocate new one. */
|
||||
if (count > 0) {
|
||||
if (npages > 0) {
|
||||
/* ttm_alloc_new_pages doesn't reference pool so we can run
|
||||
* multiple requests in parallel.
|
||||
**/
|
||||
r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
|
||||
INIT_LIST_HEAD(&plist);
|
||||
r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
|
||||
list_for_each_entry(p, &plist, lru) {
|
||||
pages[count++] = p;
|
||||
}
|
||||
if (r) {
|
||||
/* If there is any pages in the list put them back to
|
||||
* the pool. */
|
||||
printk(KERN_ERR TTM_PFX
|
||||
"Failed to allocate extra pages "
|
||||
"for large request.");
|
||||
ttm_put_pages(pages, 0, flags, cstate, NULL);
|
||||
ttm_put_pages(pages, count, flags, cstate);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Put all pages in pages list to correct pool to wait for reuse */
|
||||
void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
|
||||
enum ttm_caching_state cstate, dma_addr_t *dma_address)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
|
||||
struct page *p, *tmp;
|
||||
|
||||
if (pool == NULL) {
|
||||
/* No pool for this memory type so free the pages */
|
||||
|
||||
list_for_each_entry_safe(p, tmp, pages, lru) {
|
||||
__free_page(p);
|
||||
}
|
||||
/* Make the pages list empty */
|
||||
INIT_LIST_HEAD(pages);
|
||||
return;
|
||||
}
|
||||
if (page_count == 0) {
|
||||
list_for_each_entry_safe(p, tmp, pages, lru) {
|
||||
++page_count;
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&pool->lock, irq_flags);
|
||||
list_splice_init(pages, &pool->list);
|
||||
pool->npages += page_count;
|
||||
/* Check that we don't go over the pool limit */
|
||||
page_count = 0;
|
||||
if (pool->npages > _manager->options.max_size) {
|
||||
page_count = pool->npages - _manager->options.max_size;
|
||||
/* free at least NUM_PAGES_TO_ALLOC number of pages
|
||||
* to reduce calls to set_memory_wb */
|
||||
if (page_count < NUM_PAGES_TO_ALLOC)
|
||||
page_count = NUM_PAGES_TO_ALLOC;
|
||||
}
|
||||
spin_unlock_irqrestore(&pool->lock, irq_flags);
|
||||
if (page_count)
|
||||
ttm_page_pool_free(pool, page_count);
|
||||
}
|
||||
|
||||
static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
|
||||
char *name)
|
||||
{
|
||||
|
@ -836,6 +854,62 @@ void ttm_page_alloc_fini(void)
|
|||
_manager = NULL;
|
||||
}
|
||||
|
||||
int ttm_pool_populate(struct ttm_tt *ttm)
|
||||
{
|
||||
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
if (ttm->state != tt_unpopulated)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
ret = ttm_get_pages(&ttm->pages[i], 1,
|
||||
ttm->page_flags,
|
||||
ttm->caching_state);
|
||||
if (ret != 0) {
|
||||
ttm_pool_unpopulate(ttm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
|
||||
false, false);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_pool_unpopulate(ttm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
|
||||
ret = ttm_tt_swapin(ttm);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_pool_unpopulate(ttm);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ttm->state = tt_unbound;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_pool_populate);
|
||||
|
||||
void ttm_pool_unpopulate(struct ttm_tt *ttm)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
if (ttm->pages[i]) {
|
||||
ttm_mem_global_free_page(ttm->glob->mem_glob,
|
||||
ttm->pages[i]);
|
||||
ttm_put_pages(&ttm->pages[i], 1,
|
||||
ttm->page_flags,
|
||||
ttm->caching_state);
|
||||
}
|
||||
}
|
||||
ttm->state = tt_unpopulated;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_pool_unpopulate);
|
||||
|
||||
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
|
||||
{
|
||||
struct ttm_page_pool *p;
|
||||
|
|
1137
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
Normal file
1137
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
Normal file
File diff suppressed because it is too large
Load diff
|
@ -43,140 +43,21 @@
|
|||
#include "ttm/ttm_placement.h"
|
||||
#include "ttm/ttm_page_alloc.h"
|
||||
|
||||
static int ttm_tt_swapin(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* Allocates storage for pointers to the pages that back the ttm.
|
||||
*/
|
||||
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
|
||||
{
|
||||
ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
|
||||
ttm->dma_address = drm_calloc_large(ttm->num_pages,
|
||||
ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
|
||||
}
|
||||
|
||||
static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
|
||||
{
|
||||
ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
|
||||
ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
|
||||
sizeof(*ttm->dma_address));
|
||||
}
|
||||
|
||||
static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
|
||||
{
|
||||
drm_free_large(ttm->pages);
|
||||
ttm->pages = NULL;
|
||||
drm_free_large(ttm->dma_address);
|
||||
ttm->dma_address = NULL;
|
||||
}
|
||||
|
||||
static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
|
||||
{
|
||||
int write;
|
||||
int dirty;
|
||||
struct page *page;
|
||||
int i;
|
||||
struct ttm_backend *be = ttm->be;
|
||||
|
||||
BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
|
||||
write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
|
||||
dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
|
||||
|
||||
if (be)
|
||||
be->func->clear(be);
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
page = ttm->pages[i];
|
||||
if (page == NULL)
|
||||
continue;
|
||||
|
||||
if (page == ttm->dummy_read_page) {
|
||||
BUG_ON(write);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (write && dirty && !PageReserved(page))
|
||||
set_page_dirty_lock(page);
|
||||
|
||||
ttm->pages[i] = NULL;
|
||||
ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
|
||||
put_page(page);
|
||||
}
|
||||
ttm->state = tt_unpopulated;
|
||||
ttm->first_himem_page = ttm->num_pages;
|
||||
ttm->last_lomem_page = -1;
|
||||
}
|
||||
|
||||
static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
|
||||
{
|
||||
struct page *p;
|
||||
struct list_head h;
|
||||
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
|
||||
int ret;
|
||||
|
||||
while (NULL == (p = ttm->pages[index])) {
|
||||
|
||||
INIT_LIST_HEAD(&h);
|
||||
|
||||
ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
|
||||
&ttm->dma_address[index]);
|
||||
|
||||
if (ret != 0)
|
||||
return NULL;
|
||||
|
||||
p = list_first_entry(&h, struct page, lru);
|
||||
|
||||
ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
|
||||
if (PageHighMem(p))
|
||||
ttm->pages[--ttm->first_himem_page] = p;
|
||||
else
|
||||
ttm->pages[++ttm->last_lomem_page] = p;
|
||||
}
|
||||
return p;
|
||||
out_err:
|
||||
put_page(p);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
|
||||
ret = ttm_tt_swapin(ttm);
|
||||
if (unlikely(ret != 0))
|
||||
return NULL;
|
||||
}
|
||||
return __ttm_tt_get_page(ttm, index);
|
||||
}
|
||||
|
||||
int ttm_tt_populate(struct ttm_tt *ttm)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long i;
|
||||
struct ttm_backend *be;
|
||||
int ret;
|
||||
|
||||
if (ttm->state != tt_unpopulated)
|
||||
return 0;
|
||||
|
||||
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
|
||||
ret = ttm_tt_swapin(ttm);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
|
||||
be = ttm->be;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
page = __ttm_tt_get_page(ttm, i);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
be->func->populate(be, ttm->num_pages, ttm->pages,
|
||||
ttm->dummy_read_page, ttm->dma_address);
|
||||
ttm->state = tt_unbound;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_tt_populate);
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
static inline int ttm_tt_set_page_caching(struct page *p,
|
||||
enum ttm_caching_state c_old,
|
||||
|
@ -278,153 +159,98 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
|
|||
}
|
||||
EXPORT_SYMBOL(ttm_tt_set_placement_caching);
|
||||
|
||||
static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
|
||||
{
|
||||
int i;
|
||||
unsigned count = 0;
|
||||
struct list_head h;
|
||||
struct page *cur_page;
|
||||
struct ttm_backend *be = ttm->be;
|
||||
|
||||
INIT_LIST_HEAD(&h);
|
||||
|
||||
if (be)
|
||||
be->func->clear(be);
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
|
||||
cur_page = ttm->pages[i];
|
||||
ttm->pages[i] = NULL;
|
||||
if (cur_page) {
|
||||
if (page_count(cur_page) != 1)
|
||||
printk(KERN_ERR TTM_PFX
|
||||
"Erroneous page count. "
|
||||
"Leaking pages.\n");
|
||||
ttm_mem_global_free_page(ttm->glob->mem_glob,
|
||||
cur_page);
|
||||
list_add(&cur_page->lru, &h);
|
||||
count++;
|
||||
}
|
||||
}
|
||||
ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
|
||||
ttm->dma_address);
|
||||
ttm->state = tt_unpopulated;
|
||||
ttm->first_himem_page = ttm->num_pages;
|
||||
ttm->last_lomem_page = -1;
|
||||
}
|
||||
|
||||
void ttm_tt_destroy(struct ttm_tt *ttm)
|
||||
{
|
||||
struct ttm_backend *be;
|
||||
|
||||
if (unlikely(ttm == NULL))
|
||||
return;
|
||||
|
||||
be = ttm->be;
|
||||
if (likely(be != NULL)) {
|
||||
be->func->destroy(be);
|
||||
ttm->be = NULL;
|
||||
if (ttm->state == tt_bound) {
|
||||
ttm_tt_unbind(ttm);
|
||||
}
|
||||
|
||||
if (likely(ttm->pages != NULL)) {
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_USER)
|
||||
ttm_tt_free_user_pages(ttm);
|
||||
else
|
||||
ttm_tt_free_alloced_pages(ttm);
|
||||
|
||||
ttm_tt_free_page_directory(ttm);
|
||||
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
|
||||
}
|
||||
|
||||
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
|
||||
ttm->swap_storage)
|
||||
fput(ttm->swap_storage);
|
||||
|
||||
kfree(ttm);
|
||||
ttm->swap_storage = NULL;
|
||||
ttm->func->destroy(ttm);
|
||||
}
|
||||
|
||||
int ttm_tt_set_user(struct ttm_tt *ttm,
|
||||
struct task_struct *tsk,
|
||||
unsigned long start, unsigned long num_pages)
|
||||
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
struct mm_struct *mm = tsk->mm;
|
||||
int ret;
|
||||
int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
|
||||
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
|
||||
|
||||
BUG_ON(num_pages != ttm->num_pages);
|
||||
BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
|
||||
|
||||
/**
|
||||
* Account user pages as lowmem pages for now.
|
||||
*/
|
||||
|
||||
ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
|
||||
false, false);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
ret = get_user_pages(tsk, mm, start, num_pages,
|
||||
write, 0, ttm->pages, NULL);
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
if (ret != num_pages && write) {
|
||||
ttm_tt_free_user_pages(ttm);
|
||||
ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ttm->tsk = tsk;
|
||||
ttm->start = start;
|
||||
ttm->state = tt_unbound;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
|
||||
uint32_t page_flags, struct page *dummy_read_page)
|
||||
{
|
||||
struct ttm_bo_driver *bo_driver = bdev->driver;
|
||||
struct ttm_tt *ttm;
|
||||
|
||||
if (!bo_driver)
|
||||
return NULL;
|
||||
|
||||
ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
|
||||
if (!ttm)
|
||||
return NULL;
|
||||
|
||||
ttm->bdev = bdev;
|
||||
ttm->glob = bdev->glob;
|
||||
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
ttm->first_himem_page = ttm->num_pages;
|
||||
ttm->last_lomem_page = -1;
|
||||
ttm->caching_state = tt_cached;
|
||||
ttm->page_flags = page_flags;
|
||||
|
||||
ttm->dummy_read_page = dummy_read_page;
|
||||
ttm->state = tt_unpopulated;
|
||||
|
||||
ttm_tt_alloc_page_directory(ttm);
|
||||
if (!ttm->pages) {
|
||||
ttm_tt_destroy(ttm);
|
||||
printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
|
||||
return NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
ttm->be = bo_driver->create_ttm_backend_entry(bdev);
|
||||
if (!ttm->be) {
|
||||
ttm_tt_destroy(ttm);
|
||||
printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
|
||||
return NULL;
|
||||
}
|
||||
ttm->state = tt_unpopulated;
|
||||
return ttm;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_tt_init);
|
||||
|
||||
void ttm_tt_fini(struct ttm_tt *ttm)
|
||||
{
|
||||
drm_free_large(ttm->pages);
|
||||
ttm->pages = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_tt_fini);
|
||||
|
||||
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
|
||||
ttm->bdev = bdev;
|
||||
ttm->glob = bdev->glob;
|
||||
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
ttm->caching_state = tt_cached;
|
||||
ttm->page_flags = page_flags;
|
||||
ttm->dummy_read_page = dummy_read_page;
|
||||
ttm->state = tt_unpopulated;
|
||||
|
||||
INIT_LIST_HEAD(&ttm_dma->pages_list);
|
||||
ttm_dma_tt_alloc_page_directory(ttm_dma);
|
||||
if (!ttm->pages || !ttm_dma->dma_address) {
|
||||
ttm_tt_destroy(ttm);
|
||||
printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_dma_tt_init);
|
||||
|
||||
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
|
||||
{
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
|
||||
drm_free_large(ttm->pages);
|
||||
ttm->pages = NULL;
|
||||
drm_free_large(ttm_dma->dma_address);
|
||||
ttm_dma->dma_address = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_dma_tt_fini);
|
||||
|
||||
void ttm_tt_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
int ret;
|
||||
struct ttm_backend *be = ttm->be;
|
||||
|
||||
if (ttm->state == tt_bound) {
|
||||
ret = be->func->unbind(be);
|
||||
ret = ttm->func->unbind(ttm);
|
||||
BUG_ON(ret);
|
||||
ttm->state = tt_unbound;
|
||||
}
|
||||
|
@ -433,7 +259,6 @@ void ttm_tt_unbind(struct ttm_tt *ttm)
|
|||
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
int ret = 0;
|
||||
struct ttm_backend *be;
|
||||
|
||||
if (!ttm)
|
||||
return -EINVAL;
|
||||
|
@ -441,25 +266,21 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
|||
if (ttm->state == tt_bound)
|
||||
return 0;
|
||||
|
||||
be = ttm->be;
|
||||
|
||||
ret = ttm_tt_populate(ttm);
|
||||
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = be->func->bind(be, bo_mem);
|
||||
ret = ttm->func->bind(ttm, bo_mem);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ttm->state = tt_bound;
|
||||
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_USER)
|
||||
ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_tt_bind);
|
||||
|
||||
static int ttm_tt_swapin(struct ttm_tt *ttm)
|
||||
int ttm_tt_swapin(struct ttm_tt *ttm)
|
||||
{
|
||||
struct address_space *swap_space;
|
||||
struct file *swap_storage;
|
||||
|
@ -470,16 +291,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
|
|||
int i;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
|
||||
ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
|
||||
ttm->num_pages);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
swap_storage = ttm->swap_storage;
|
||||
BUG_ON(swap_storage == NULL);
|
||||
|
||||
|
@ -491,7 +302,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
|
|||
ret = PTR_ERR(from_page);
|
||||
goto out_err;
|
||||
}
|
||||
to_page = __ttm_tt_get_page(ttm, i);
|
||||
to_page = ttm->pages[i];
|
||||
if (unlikely(to_page == NULL))
|
||||
goto out_err;
|
||||
|
||||
|
@ -512,7 +323,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
|
|||
|
||||
return 0;
|
||||
out_err:
|
||||
ttm_tt_free_alloced_pages(ttm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -530,18 +340,6 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
|
|||
BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
|
||||
BUG_ON(ttm->caching_state != tt_cached);
|
||||
|
||||
/*
|
||||
* For user buffers, just unpin the pages, as there should be
|
||||
* vma references.
|
||||
*/
|
||||
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
|
||||
ttm_tt_free_user_pages(ttm);
|
||||
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
|
||||
ttm->swap_storage = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!persistent_swap_storage) {
|
||||
swap_storage = shmem_file_setup("ttm swap",
|
||||
ttm->num_pages << PAGE_SHIFT,
|
||||
|
@ -576,7 +374,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
|
|||
page_cache_release(to_page);
|
||||
}
|
||||
|
||||
ttm_tt_free_alloced_pages(ttm);
|
||||
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
|
||||
ttm->swap_storage = swap_storage;
|
||||
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
|
||||
if (persistent_swap_storage)
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "vmwgfx_drv.h"
|
||||
#include "ttm/ttm_bo_driver.h"
|
||||
#include "ttm/ttm_placement.h"
|
||||
#include "ttm/ttm_page_alloc.h"
|
||||
|
||||
static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
|
||||
TTM_PL_FLAG_CACHED;
|
||||
|
@ -139,85 +140,63 @@ struct ttm_placement vmw_srf_placement = {
|
|||
.busy_placement = gmr_vram_placement_flags
|
||||
};
|
||||
|
||||
struct vmw_ttm_backend {
|
||||
struct ttm_backend backend;
|
||||
struct page **pages;
|
||||
unsigned long num_pages;
|
||||
struct vmw_ttm_tt {
|
||||
struct ttm_tt ttm;
|
||||
struct vmw_private *dev_priv;
|
||||
int gmr_id;
|
||||
};
|
||||
|
||||
static int vmw_ttm_populate(struct ttm_backend *backend,
|
||||
unsigned long num_pages, struct page **pages,
|
||||
struct page *dummy_read_page,
|
||||
dma_addr_t *dma_addrs)
|
||||
static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct vmw_ttm_backend *vmw_be =
|
||||
container_of(backend, struct vmw_ttm_backend, backend);
|
||||
|
||||
vmw_be->pages = pages;
|
||||
vmw_be->num_pages = num_pages;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct vmw_ttm_backend *vmw_be =
|
||||
container_of(backend, struct vmw_ttm_backend, backend);
|
||||
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
|
||||
|
||||
vmw_be->gmr_id = bo_mem->start;
|
||||
|
||||
return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages,
|
||||
vmw_be->num_pages, vmw_be->gmr_id);
|
||||
return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages,
|
||||
ttm->num_pages, vmw_be->gmr_id);
|
||||
}
|
||||
|
||||
static int vmw_ttm_unbind(struct ttm_backend *backend)
|
||||
static int vmw_ttm_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
struct vmw_ttm_backend *vmw_be =
|
||||
container_of(backend, struct vmw_ttm_backend, backend);
|
||||
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
|
||||
|
||||
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_ttm_clear(struct ttm_backend *backend)
|
||||
static void vmw_ttm_destroy(struct ttm_tt *ttm)
|
||||
{
|
||||
struct vmw_ttm_backend *vmw_be =
|
||||
container_of(backend, struct vmw_ttm_backend, backend);
|
||||
|
||||
vmw_be->pages = NULL;
|
||||
vmw_be->num_pages = 0;
|
||||
}
|
||||
|
||||
static void vmw_ttm_destroy(struct ttm_backend *backend)
|
||||
{
|
||||
struct vmw_ttm_backend *vmw_be =
|
||||
container_of(backend, struct vmw_ttm_backend, backend);
|
||||
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
|
||||
|
||||
ttm_tt_fini(ttm);
|
||||
kfree(vmw_be);
|
||||
}
|
||||
|
||||
static struct ttm_backend_func vmw_ttm_func = {
|
||||
.populate = vmw_ttm_populate,
|
||||
.clear = vmw_ttm_clear,
|
||||
.bind = vmw_ttm_bind,
|
||||
.unbind = vmw_ttm_unbind,
|
||||
.destroy = vmw_ttm_destroy,
|
||||
};
|
||||
|
||||
struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
|
||||
struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
struct vmw_ttm_backend *vmw_be;
|
||||
struct vmw_ttm_tt *vmw_be;
|
||||
|
||||
vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
|
||||
if (!vmw_be)
|
||||
return NULL;
|
||||
|
||||
vmw_be->backend.func = &vmw_ttm_func;
|
||||
vmw_be->ttm.func = &vmw_ttm_func;
|
||||
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
|
||||
|
||||
return &vmw_be->backend;
|
||||
if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
|
||||
kfree(vmw_be);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &vmw_be->ttm;
|
||||
}
|
||||
|
||||
int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
|
||||
|
@ -357,7 +336,9 @@ static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
|
|||
}
|
||||
|
||||
struct ttm_bo_driver vmw_bo_driver = {
|
||||
.create_ttm_backend_entry = vmw_ttm_backend_init,
|
||||
.ttm_tt_create = &vmw_ttm_tt_create,
|
||||
.ttm_tt_populate = &ttm_pool_populate,
|
||||
.ttm_tt_unpopulate = &ttm_pool_unpopulate,
|
||||
.invalidate_caches = vmw_invalidate_caches,
|
||||
.init_mem_type = vmw_init_mem_type,
|
||||
.evict_flags = vmw_evict_flags,
|
||||
|
|
|
@ -1517,29 +1517,10 @@ int vmw_surface_check(struct vmw_private *dev_priv,
|
|||
/**
|
||||
* Buffer management.
|
||||
*/
|
||||
|
||||
static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
|
||||
unsigned long num_pages)
|
||||
{
|
||||
static size_t bo_user_size = ~0;
|
||||
|
||||
size_t page_array_size =
|
||||
(num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
|
||||
|
||||
if (unlikely(bo_user_size == ~0)) {
|
||||
bo_user_size = glob->ttm_bo_extra_size +
|
||||
ttm_round_pot(sizeof(struct vmw_dma_buffer));
|
||||
}
|
||||
|
||||
return bo_user_size + page_array_size;
|
||||
}
|
||||
|
||||
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
|
||||
struct ttm_bo_global *glob = bo->glob;
|
||||
|
||||
ttm_mem_global_free(glob->mem_glob, bo->acc_size);
|
||||
kfree(vmw_bo);
|
||||
}
|
||||
|
||||
|
@ -1550,24 +1531,12 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
|
|||
void (*bo_free) (struct ttm_buffer_object *bo))
|
||||
{
|
||||
struct ttm_bo_device *bdev = &dev_priv->bdev;
|
||||
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
|
||||
size_t acc_size;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!bo_free);
|
||||
|
||||
acc_size =
|
||||
vmw_dmabuf_acc_size(bdev->glob,
|
||||
(size + PAGE_SIZE - 1) >> PAGE_SHIFT);
|
||||
|
||||
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
|
||||
if (unlikely(ret != 0)) {
|
||||
/* we must free the bo here as
|
||||
* ttm_buffer_object_init does so as well */
|
||||
bo_free(&vmw_bo->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
|
||||
memset(vmw_bo, 0, sizeof(*vmw_bo));
|
||||
|
||||
INIT_LIST_HEAD(&vmw_bo->validate_list);
|
||||
|
@ -1582,9 +1551,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
|
|||
static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
|
||||
struct ttm_bo_global *glob = bo->glob;
|
||||
|
||||
ttm_mem_global_free(glob->mem_glob, bo->acc_size);
|
||||
kfree(vmw_user_bo);
|
||||
}
|
||||
|
||||
|
|
|
@ -153,7 +153,7 @@ void __init xen_swiotlb_init(int verbose)
|
|||
char *m = NULL;
|
||||
unsigned int repeat = 3;
|
||||
|
||||
nr_tbl = swioltb_nr_tbl();
|
||||
nr_tbl = swiotlb_nr_tbl();
|
||||
if (nr_tbl)
|
||||
xen_io_tlb_nslabs = nr_tbl;
|
||||
else {
|
||||
|
|
|
@ -122,17 +122,12 @@ struct ttm_mem_reg {
|
|||
* be mmapped by user space. Each of these bos occupy a slot in the
|
||||
* device address space, that can be used for normal vm operations.
|
||||
*
|
||||
* @ttm_bo_type_user: These are user-space memory areas that are made
|
||||
* available to the GPU by mapping the buffer pages into the GPU aperture
|
||||
* space. These buffers cannot be mmaped from the device address space.
|
||||
*
|
||||
* @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
|
||||
* but they cannot be accessed from user-space. For kernel-only use.
|
||||
*/
|
||||
|
||||
enum ttm_bo_type {
|
||||
ttm_bo_type_device,
|
||||
ttm_bo_type_user,
|
||||
ttm_bo_type_kernel
|
||||
};
|
||||
|
||||
|
@ -434,9 +429,9 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
|
|||
* -EBUSY if the buffer is busy and no_wait is true.
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
|
||||
extern int
|
||||
ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
|
||||
|
||||
/**
|
||||
* ttm_bo_synccpu_write_release:
|
||||
*
|
||||
|
@ -446,6 +441,22 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
|
|||
*/
|
||||
extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
|
||||
|
||||
/**
|
||||
* ttm_bo_acc_size
|
||||
*
|
||||
* @bdev: Pointer to a ttm_bo_device struct.
|
||||
* @bo_size: size of the buffer object in byte.
|
||||
* @struct_size: size of the structure holding buffer object datas
|
||||
*
|
||||
* Returns size to account for a buffer object
|
||||
*/
|
||||
size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
|
||||
unsigned long bo_size,
|
||||
unsigned struct_size);
|
||||
size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
|
||||
unsigned long bo_size,
|
||||
unsigned struct_size);
|
||||
|
||||
/**
|
||||
* ttm_bo_init
|
||||
*
|
||||
|
@ -493,6 +504,7 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
|
|||
struct file *persistent_swap_storage,
|
||||
size_t acc_size,
|
||||
void (*destroy) (struct ttm_buffer_object *));
|
||||
|
||||
/**
|
||||
* ttm_bo_synccpu_object_init
|
||||
*
|
||||
|
|
|
@ -42,37 +42,10 @@
|
|||
struct ttm_backend;
|
||||
|
||||
struct ttm_backend_func {
|
||||
/**
|
||||
* struct ttm_backend_func member populate
|
||||
*
|
||||
* @backend: Pointer to a struct ttm_backend.
|
||||
* @num_pages: Number of pages to populate.
|
||||
* @pages: Array of pointers to ttm pages.
|
||||
* @dummy_read_page: Page to be used instead of NULL pages in the
|
||||
* array @pages.
|
||||
* @dma_addrs: Array of DMA (bus) address of the ttm pages.
|
||||
*
|
||||
* Populate the backend with ttm pages. Depending on the backend,
|
||||
* it may or may not copy the @pages array.
|
||||
*/
|
||||
int (*populate) (struct ttm_backend *backend,
|
||||
unsigned long num_pages, struct page **pages,
|
||||
struct page *dummy_read_page,
|
||||
dma_addr_t *dma_addrs);
|
||||
/**
|
||||
* struct ttm_backend_func member clear
|
||||
*
|
||||
* @backend: Pointer to a struct ttm_backend.
|
||||
*
|
||||
* This is an "unpopulate" function. Release all resources
|
||||
* allocated with populate.
|
||||
*/
|
||||
void (*clear) (struct ttm_backend *backend);
|
||||
|
||||
/**
|
||||
* struct ttm_backend_func member bind
|
||||
*
|
||||
* @backend: Pointer to a struct ttm_backend.
|
||||
* @ttm: Pointer to a struct ttm_tt.
|
||||
* @bo_mem: Pointer to a struct ttm_mem_reg describing the
|
||||
* memory type and location for binding.
|
||||
*
|
||||
|
@ -80,46 +53,29 @@ struct ttm_backend_func {
|
|||
* indicated by @bo_mem. This function should be able to handle
|
||||
* differences between aperture and system page sizes.
|
||||
*/
|
||||
int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem);
|
||||
int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
|
||||
|
||||
/**
|
||||
* struct ttm_backend_func member unbind
|
||||
*
|
||||
* @backend: Pointer to a struct ttm_backend.
|
||||
* @ttm: Pointer to a struct ttm_tt.
|
||||
*
|
||||
* Unbind previously bound backend pages. This function should be
|
||||
* able to handle differences between aperture and system page sizes.
|
||||
*/
|
||||
int (*unbind) (struct ttm_backend *backend);
|
||||
int (*unbind) (struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* struct ttm_backend_func member destroy
|
||||
*
|
||||
* @backend: Pointer to a struct ttm_backend.
|
||||
* @ttm: Pointer to a struct ttm_tt.
|
||||
*
|
||||
* Destroy the backend.
|
||||
* Destroy the backend. This will be call back from ttm_tt_destroy so
|
||||
* don't call ttm_tt_destroy from the callback or infinite loop.
|
||||
*/
|
||||
void (*destroy) (struct ttm_backend *backend);
|
||||
void (*destroy) (struct ttm_tt *ttm);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ttm_backend
|
||||
*
|
||||
* @bdev: Pointer to a struct ttm_bo_device.
|
||||
* @flags: For driver use.
|
||||
* @func: Pointer to a struct ttm_backend_func that describes
|
||||
* the backend methods.
|
||||
*
|
||||
*/
|
||||
|
||||
struct ttm_backend {
|
||||
struct ttm_bo_device *bdev;
|
||||
uint32_t flags;
|
||||
struct ttm_backend_func *func;
|
||||
};
|
||||
|
||||
#define TTM_PAGE_FLAG_USER (1 << 1)
|
||||
#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
|
||||
#define TTM_PAGE_FLAG_WRITE (1 << 3)
|
||||
#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
|
||||
#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
|
||||
|
@ -135,23 +91,18 @@ enum ttm_caching_state {
|
|||
/**
|
||||
* struct ttm_tt
|
||||
*
|
||||
* @bdev: Pointer to a struct ttm_bo_device.
|
||||
* @func: Pointer to a struct ttm_backend_func that describes
|
||||
* the backend methods.
|
||||
* @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
|
||||
* pointer.
|
||||
* @pages: Array of pages backing the data.
|
||||
* @first_himem_page: Himem pages are put last in the page array, which
|
||||
* enables us to run caching attribute changes on only the first part
|
||||
* of the page array containing lomem pages. This is the index of the
|
||||
* first himem page.
|
||||
* @last_lomem_page: Index of the last lomem page in the page array.
|
||||
* @num_pages: Number of pages in the page array.
|
||||
* @bdev: Pointer to the current struct ttm_bo_device.
|
||||
* @be: Pointer to the ttm backend.
|
||||
* @tsk: The task for user ttm.
|
||||
* @start: virtual address for user ttm.
|
||||
* @swap_storage: Pointer to shmem struct file for swap storage.
|
||||
* @caching_state: The current caching state of the pages.
|
||||
* @state: The current binding state of the pages.
|
||||
* @dma_address: The DMA (bus) addresses of the pages (if TTM_PAGE_FLAG_DMA32)
|
||||
*
|
||||
* This is a structure holding the pages, caching- and aperture binding
|
||||
* status for a buffer object that isn't backed by fixed (VRAM / AGP)
|
||||
|
@ -159,16 +110,14 @@ enum ttm_caching_state {
|
|||
*/
|
||||
|
||||
struct ttm_tt {
|
||||
struct ttm_bo_device *bdev;
|
||||
struct ttm_backend_func *func;
|
||||
struct page *dummy_read_page;
|
||||
struct page **pages;
|
||||
long first_himem_page;
|
||||
long last_lomem_page;
|
||||
uint32_t page_flags;
|
||||
unsigned long num_pages;
|
||||
struct ttm_bo_global *glob;
|
||||
struct ttm_backend *be;
|
||||
struct task_struct *tsk;
|
||||
unsigned long start;
|
||||
struct file *swap_storage;
|
||||
enum ttm_caching_state caching_state;
|
||||
enum {
|
||||
|
@ -176,7 +125,23 @@ struct ttm_tt {
|
|||
tt_unbound,
|
||||
tt_unpopulated,
|
||||
} state;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ttm_dma_tt
|
||||
*
|
||||
* @ttm: Base ttm_tt struct.
|
||||
* @dma_address: The DMA (bus) addresses of the pages
|
||||
* @pages_list: used by some page allocation backend
|
||||
*
|
||||
* This is a structure holding the pages, caching- and aperture binding
|
||||
* status for a buffer object that isn't backed by fixed (VRAM / AGP)
|
||||
* memory.
|
||||
*/
|
||||
struct ttm_dma_tt {
|
||||
struct ttm_tt ttm;
|
||||
dma_addr_t *dma_address;
|
||||
struct list_head pages_list;
|
||||
};
|
||||
|
||||
#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
|
||||
|
@ -351,15 +316,42 @@ struct ttm_mem_type_manager {
|
|||
|
||||
struct ttm_bo_driver {
|
||||
/**
|
||||
* struct ttm_bo_driver member create_ttm_backend_entry
|
||||
* ttm_tt_create
|
||||
*
|
||||
* @bdev: The buffer object device.
|
||||
* @bdev: pointer to a struct ttm_bo_device:
|
||||
* @size: Size of the data needed backing.
|
||||
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
|
||||
* @dummy_read_page: See struct ttm_bo_device.
|
||||
*
|
||||
* Create a driver specific struct ttm_backend.
|
||||
* Create a struct ttm_tt to back data with system memory pages.
|
||||
* No pages are actually allocated.
|
||||
* Returns:
|
||||
* NULL: Out of memory.
|
||||
*/
|
||||
struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
|
||||
unsigned long size,
|
||||
uint32_t page_flags,
|
||||
struct page *dummy_read_page);
|
||||
|
||||
struct ttm_backend *(*create_ttm_backend_entry)
|
||||
(struct ttm_bo_device *bdev);
|
||||
/**
|
||||
* ttm_tt_populate
|
||||
*
|
||||
* @ttm: The struct ttm_tt to contain the backing pages.
|
||||
*
|
||||
* Allocate all backing pages
|
||||
* Returns:
|
||||
* -ENOMEM: Out of memory.
|
||||
*/
|
||||
int (*ttm_tt_populate)(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_tt_unpopulate
|
||||
*
|
||||
* @ttm: The struct ttm_tt to contain the backing pages.
|
||||
*
|
||||
* Free all backing page
|
||||
*/
|
||||
void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* struct ttm_bo_driver member invalidate_caches
|
||||
|
@ -477,9 +469,6 @@ struct ttm_bo_global_ref {
|
|||
* @dummy_read_page: Pointer to a dummy page used for mapping requests
|
||||
* of unpopulated pages.
|
||||
* @shrink: A shrink callback object used for buffer object swap.
|
||||
* @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
|
||||
* used by a buffer object. This is excluding page arrays and backing pages.
|
||||
* @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
|
||||
* @device_list_mutex: Mutex protecting the device list.
|
||||
* This mutex is held while traversing the device list for pm options.
|
||||
* @lru_lock: Spinlock protecting the bo subsystem lru lists.
|
||||
|
@ -497,8 +486,6 @@ struct ttm_bo_global {
|
|||
struct ttm_mem_global *mem_glob;
|
||||
struct page *dummy_read_page;
|
||||
struct ttm_mem_shrink shrink;
|
||||
size_t ttm_bo_extra_size;
|
||||
size_t ttm_bo_size;
|
||||
struct mutex device_list_mutex;
|
||||
spinlock_t lru_lock;
|
||||
|
||||
|
@ -600,8 +587,9 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
|
|||
}
|
||||
|
||||
/**
|
||||
* ttm_tt_create
|
||||
* ttm_tt_init
|
||||
*
|
||||
* @ttm: The struct ttm_tt.
|
||||
* @bdev: pointer to a struct ttm_bo_device:
|
||||
* @size: Size of the data needed backing.
|
||||
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
|
||||
|
@ -612,28 +600,22 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
|
|||
* Returns:
|
||||
* NULL: Out of memory.
|
||||
*/
|
||||
extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size,
|
||||
uint32_t page_flags,
|
||||
struct page *dummy_read_page);
|
||||
extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
struct page *dummy_read_page);
|
||||
extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
struct page *dummy_read_page);
|
||||
|
||||
/**
|
||||
* ttm_tt_set_user:
|
||||
* ttm_tt_fini
|
||||
*
|
||||
* @ttm: The struct ttm_tt to populate.
|
||||
* @tsk: A struct task_struct for which @start is a valid user-space address.
|
||||
* @start: A valid user-space address.
|
||||
* @num_pages: Size in pages of the user memory area.
|
||||
* @ttm: the ttm_tt structure.
|
||||
*
|
||||
* Populate a struct ttm_tt with a user-space memory area after first pinning
|
||||
* the pages backing it.
|
||||
* Returns:
|
||||
* !0: Error.
|
||||
* Free memory of ttm_tt structure
|
||||
*/
|
||||
|
||||
extern int ttm_tt_set_user(struct ttm_tt *ttm,
|
||||
struct task_struct *tsk,
|
||||
unsigned long start, unsigned long num_pages);
|
||||
extern void ttm_tt_fini(struct ttm_tt *ttm);
|
||||
extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
|
||||
|
||||
/**
|
||||
* ttm_ttm_bind:
|
||||
|
@ -645,21 +627,12 @@ extern int ttm_tt_set_user(struct ttm_tt *ttm,
|
|||
*/
|
||||
extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
|
||||
|
||||
/**
|
||||
* ttm_tt_populate:
|
||||
*
|
||||
* @ttm: The struct ttm_tt to contain the backing pages.
|
||||
*
|
||||
* Add backing pages to all of @ttm
|
||||
*/
|
||||
extern int ttm_tt_populate(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_ttm_destroy:
|
||||
*
|
||||
* @ttm: The struct ttm_tt.
|
||||
*
|
||||
* Unbind, unpopulate and destroy a struct ttm_tt.
|
||||
* Unbind, unpopulate and destroy common struct ttm_tt.
|
||||
*/
|
||||
extern void ttm_tt_destroy(struct ttm_tt *ttm);
|
||||
|
||||
|
@ -673,19 +646,13 @@ extern void ttm_tt_destroy(struct ttm_tt *ttm);
|
|||
extern void ttm_tt_unbind(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_ttm_destroy:
|
||||
* ttm_tt_swapin:
|
||||
*
|
||||
* @ttm: The struct ttm_tt.
|
||||
* @index: Index of the desired page.
|
||||
*
|
||||
* Return a pointer to the struct page backing @ttm at page
|
||||
* index @index. If the page is unpopulated, one will be allocated to
|
||||
* populate that index.
|
||||
*
|
||||
* Returns:
|
||||
* NULL on OOM.
|
||||
* Swap in a previously swap out ttm_tt.
|
||||
*/
|
||||
extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
|
||||
extern int ttm_tt_swapin(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_tt_cache_flush:
|
||||
|
@ -1046,17 +1013,23 @@ extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
|
|||
#include <linux/agp_backend.h>
|
||||
|
||||
/**
|
||||
* ttm_agp_backend_init
|
||||
* ttm_agp_tt_create
|
||||
*
|
||||
* @bdev: Pointer to a struct ttm_bo_device.
|
||||
* @bridge: The agp bridge this device is sitting on.
|
||||
* @size: Size of the data needed backing.
|
||||
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
|
||||
* @dummy_read_page: See struct ttm_bo_device.
|
||||
*
|
||||
*
|
||||
* Create a TTM backend that uses the indicated AGP bridge as an aperture
|
||||
* for TT memory. This function uses the linux agpgart interface to
|
||||
* bind and unbind memory backing a ttm_tt.
|
||||
*/
|
||||
extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
|
||||
struct agp_bridge_data *bridge);
|
||||
extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
|
||||
struct agp_bridge_data *bridge,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
struct page *dummy_read_page);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -29,35 +29,6 @@
|
|||
#include "ttm_bo_driver.h"
|
||||
#include "ttm_memory.h"
|
||||
|
||||
/**
|
||||
* Get count number of pages from pool to pages list.
|
||||
*
|
||||
* @pages: head of empty linked list where pages are filled.
|
||||
* @flags: ttm flags for page allocation.
|
||||
* @cstate: ttm caching state for the page.
|
||||
* @count: number of pages to allocate.
|
||||
* @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
|
||||
*/
|
||||
int ttm_get_pages(struct list_head *pages,
|
||||
int flags,
|
||||
enum ttm_caching_state cstate,
|
||||
unsigned count,
|
||||
dma_addr_t *dma_address);
|
||||
/**
|
||||
* Put linked list of pages to pool.
|
||||
*
|
||||
* @pages: list of pages to free.
|
||||
* @page_count: number of pages in the list. Zero can be passed for unknown
|
||||
* count.
|
||||
* @flags: ttm flags for page allocation.
|
||||
* @cstate: ttm caching state.
|
||||
* @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
|
||||
*/
|
||||
void ttm_put_pages(struct list_head *pages,
|
||||
unsigned page_count,
|
||||
int flags,
|
||||
enum ttm_caching_state cstate,
|
||||
dma_addr_t *dma_address);
|
||||
/**
|
||||
* Initialize pool allocator.
|
||||
*/
|
||||
|
@ -67,8 +38,62 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
|
|||
*/
|
||||
void ttm_page_alloc_fini(void);
|
||||
|
||||
/**
|
||||
* ttm_pool_populate:
|
||||
*
|
||||
* @ttm: The struct ttm_tt to contain the backing pages.
|
||||
*
|
||||
* Add backing pages to all of @ttm
|
||||
*/
|
||||
extern int ttm_pool_populate(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_pool_unpopulate:
|
||||
*
|
||||
* @ttm: The struct ttm_tt which to free backing pages.
|
||||
*
|
||||
* Free all pages of @ttm
|
||||
*/
|
||||
extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* Output the state of pools to debugfs file
|
||||
*/
|
||||
extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
|
||||
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
/**
|
||||
* Initialize pool allocator.
|
||||
*/
|
||||
int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
|
||||
|
||||
/**
|
||||
* Free pool allocator.
|
||||
*/
|
||||
void ttm_dma_page_alloc_fini(void);
|
||||
|
||||
/**
|
||||
* Output the state of pools to debugfs file
|
||||
*/
|
||||
extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
|
||||
|
||||
extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
|
||||
extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
|
||||
|
||||
#else
|
||||
static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
|
||||
unsigned max_pages)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline void ttm_dma_page_alloc_fini(void) { return; }
|
||||
|
||||
static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -24,7 +24,7 @@ extern int swiotlb_force;
|
|||
|
||||
extern void swiotlb_init(int verbose);
|
||||
extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
|
||||
extern unsigned long swioltb_nr_tbl(void);
|
||||
extern unsigned long swiotlb_nr_tbl(void);
|
||||
|
||||
/*
|
||||
* Enumeration for sync targets
|
||||
|
|
|
@ -110,11 +110,11 @@ setup_io_tlb_npages(char *str)
|
|||
__setup("swiotlb=", setup_io_tlb_npages);
|
||||
/* make io_tlb_overflow tunable too? */
|
||||
|
||||
unsigned long swioltb_nr_tbl(void)
|
||||
unsigned long swiotlb_nr_tbl(void)
|
||||
{
|
||||
return io_tlb_nslabs;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
|
||||
/* Note that this doesn't work with highmem page */
|
||||
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
|
||||
volatile void *address)
|
||||
|
@ -321,6 +321,7 @@ void __init swiotlb_free(void)
|
|||
free_bootmem_late(__pa(io_tlb_start),
|
||||
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
|
||||
}
|
||||
io_tlb_nslabs = 0;
|
||||
}
|
||||
|
||||
static int is_swiotlb_buffer(phys_addr_t paddr)
|
||||
|
|
Loading…
Reference in a new issue