mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-16 18:08:20 +00:00
drm/nv50: tidy up PCIEGART implementation
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
5f6fdca570
commit
b571fe21f5
@ -425,7 +425,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||
man->available_caching = TTM_PL_FLAG_UNCACHED |
|
||||
TTM_PL_FLAG_WC;
|
||||
man->default_caching = TTM_PL_FLAG_WC;
|
||||
man->gpu_offset = 0;
|
||||
break;
|
||||
case TTM_PL_TT:
|
||||
man->func = &ttm_bo_manager_func;
|
||||
@ -441,13 +440,13 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||
TTM_MEMTYPE_FLAG_CMA;
|
||||
man->available_caching = TTM_PL_MASK_CACHING;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
man->gpu_offset = dev_priv->gart_info.aper_base;
|
||||
break;
|
||||
default:
|
||||
NV_ERROR(dev, "Unknown GART type: %d\n",
|
||||
dev_priv->gart_info.type);
|
||||
return -EINVAL;
|
||||
}
|
||||
man->gpu_offset = dev_priv->vm_gart_base;
|
||||
break;
|
||||
default:
|
||||
NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
|
||||
@ -531,12 +530,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
if (old_mem->mem_type == TTM_PL_VRAM)
|
||||
src_offset = nvbo->vma.offset;
|
||||
else
|
||||
src_offset += dev_priv->vm_gart_base;
|
||||
src_offset += dev_priv->gart_info.aper_base;
|
||||
|
||||
if (new_mem->mem_type == TTM_PL_VRAM)
|
||||
dst_offset = nvbo->vma.offset;
|
||||
else
|
||||
dst_offset += dev_priv->vm_gart_base;
|
||||
dst_offset += dev_priv->gart_info.aper_base;
|
||||
}
|
||||
|
||||
ret = RING_SPACE(chan, 3);
|
||||
|
@ -248,7 +248,6 @@ struct nouveau_channel {
|
||||
/* NV50 VM */
|
||||
struct nouveau_vm *vm;
|
||||
struct nouveau_gpuobj *vm_pd;
|
||||
struct nouveau_gpuobj *vm_gart_pt;
|
||||
|
||||
/* Objects */
|
||||
struct nouveau_gpuobj *ramin; /* Private instmem */
|
||||
@ -684,6 +683,7 @@ struct drm_nouveau_private {
|
||||
uint64_t aper_free;
|
||||
|
||||
struct nouveau_gpuobj *sg_ctxdma;
|
||||
struct nouveau_vma vma;
|
||||
} gart_info;
|
||||
|
||||
/* nv10-nv40 tiling regions */
|
||||
@ -709,8 +709,6 @@ struct drm_nouveau_private {
|
||||
|
||||
/* G8x/G9x virtual address space */
|
||||
struct nouveau_vm *chan_vm;
|
||||
uint64_t vm_gart_base;
|
||||
uint64_t vm_gart_size;
|
||||
|
||||
struct nvbios vbios;
|
||||
|
||||
|
@ -433,7 +433,7 @@ nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
|
||||
flags0 |= 0x00030000;
|
||||
break;
|
||||
case NV_MEM_TARGET_GART:
|
||||
base += dev_priv->vm_gart_base;
|
||||
base += dev_priv->gart_info.aper_base;
|
||||
default:
|
||||
flags0 &= ~0x00100000;
|
||||
break;
|
||||
@ -801,7 +801,6 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
|
||||
return ret;
|
||||
|
||||
nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);
|
||||
chan->vm->map_pgt(chan->vm_pd, 12, 1, dev_priv->gart_info.sg_ctxdma);
|
||||
}
|
||||
|
||||
/* RAMHT */
|
||||
@ -889,7 +888,6 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
|
||||
|
||||
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
|
||||
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
|
||||
nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
|
||||
|
||||
if (chan->ramin_heap.free_stack.next)
|
||||
drm_mm_takedown(&chan->ramin_heap);
|
||||
|
@ -14,7 +14,7 @@ struct nouveau_sgdma_be {
|
||||
dma_addr_t *pages;
|
||||
unsigned nr_pages;
|
||||
|
||||
unsigned pte_start;
|
||||
u64 offset;
|
||||
bool bound;
|
||||
};
|
||||
|
||||
@ -74,18 +74,6 @@ nouveau_sgdma_clear(struct ttm_backend *be)
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
|
||||
|
||||
if (dev_priv->card_type < NV_50)
|
||||
return pte + 2;
|
||||
|
||||
return pte << 1;
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
||||
{
|
||||
@ -97,32 +85,17 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
||||
|
||||
NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
|
||||
|
||||
pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
|
||||
nvbe->pte_start = pte;
|
||||
nvbe->offset = mem->start << PAGE_SHIFT;
|
||||
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
|
||||
for (i = 0; i < nvbe->nr_pages; i++) {
|
||||
dma_addr_t dma_offset = nvbe->pages[i];
|
||||
uint32_t offset_l = lower_32_bits(dma_offset);
|
||||
uint32_t offset_h = upper_32_bits(dma_offset);
|
||||
|
||||
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
|
||||
if (dev_priv->card_type < NV_50) {
|
||||
nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
|
||||
pte += 1;
|
||||
} else {
|
||||
nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
|
||||
nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
|
||||
pte += 2;
|
||||
}
|
||||
|
||||
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
|
||||
nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
|
||||
dma_offset += NV_CTXDMA_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
dev_priv->engine.instmem.flush(nvbe->dev);
|
||||
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
dev_priv->engine.fifo.tlb_flush(dev);
|
||||
dev_priv->engine.graph.tlb_flush(dev);
|
||||
}
|
||||
|
||||
nvbe->bound = true;
|
||||
return 0;
|
||||
@ -142,24 +115,10 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
|
||||
if (!nvbe->bound)
|
||||
return 0;
|
||||
|
||||
pte = nvbe->pte_start;
|
||||
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
|
||||
for (i = 0; i < nvbe->nr_pages; i++) {
|
||||
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
|
||||
if (dev_priv->card_type < NV_50) {
|
||||
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
|
||||
pte += 1;
|
||||
} else {
|
||||
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
|
||||
nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
|
||||
pte += 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
dev_priv->engine.instmem.flush(nvbe->dev);
|
||||
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
dev_priv->engine.fifo.tlb_flush(dev);
|
||||
dev_priv->engine.graph.tlb_flush(dev);
|
||||
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
|
||||
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
|
||||
}
|
||||
|
||||
nvbe->bound = false;
|
||||
@ -182,6 +141,35 @@ nouveau_sgdma_destroy(struct ttm_backend *be)
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
||||
|
||||
nvbe->offset = mem->start << PAGE_SHIFT;
|
||||
|
||||
nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
|
||||
nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
|
||||
nvbe->bound = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_sgdma_unbind(struct ttm_backend *be)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
||||
|
||||
if (!nvbe->bound)
|
||||
return 0;
|
||||
|
||||
nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
|
||||
nvbe->nr_pages << PAGE_SHIFT);
|
||||
nvbe->bound = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ttm_backend_func nouveau_sgdma_backend = {
|
||||
.populate = nouveau_sgdma_populate,
|
||||
.clear = nouveau_sgdma_clear,
|
||||
@ -190,23 +178,30 @@ static struct ttm_backend_func nouveau_sgdma_backend = {
|
||||
.destroy = nouveau_sgdma_destroy
|
||||
};
|
||||
|
||||
static struct ttm_backend_func nv50_sgdma_backend = {
|
||||
.populate = nouveau_sgdma_populate,
|
||||
.clear = nouveau_sgdma_clear,
|
||||
.bind = nv50_sgdma_bind,
|
||||
.unbind = nv50_sgdma_unbind,
|
||||
.destroy = nouveau_sgdma_destroy
|
||||
};
|
||||
|
||||
struct ttm_backend *
|
||||
nouveau_sgdma_init_ttm(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_sgdma_be *nvbe;
|
||||
|
||||
if (!dev_priv->gart_info.sg_ctxdma)
|
||||
return NULL;
|
||||
|
||||
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
|
||||
if (!nvbe)
|
||||
return NULL;
|
||||
|
||||
nvbe->dev = dev;
|
||||
|
||||
nvbe->backend.func = &nouveau_sgdma_backend;
|
||||
|
||||
if (dev_priv->card_type < NV_50)
|
||||
nvbe->backend.func = &nouveau_sgdma_backend;
|
||||
else
|
||||
nvbe->backend.func = &nv50_sgdma_backend;
|
||||
return &nvbe->backend;
|
||||
}
|
||||
|
||||
@ -226,21 +221,15 @@ nouveau_sgdma_init(struct drm_device *dev)
|
||||
|
||||
obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
|
||||
obj_size += 8; /* ctxdma header */
|
||||
} else {
|
||||
/* 1 entire VM page table */
|
||||
aper_size = (512 * 1024 * 1024);
|
||||
obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
|
||||
NVOBJ_FLAG_ZERO_ALLOC |
|
||||
NVOBJ_FLAG_ZERO_FREE, &gpuobj);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
|
||||
NVOBJ_FLAG_ZERO_ALLOC |
|
||||
NVOBJ_FLAG_ZERO_FREE, &gpuobj);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (dev_priv->card_type < NV_50) {
|
||||
nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
|
||||
(1 << 12) /* PT present */ |
|
||||
(0 << 13) /* PT *not* linear */ |
|
||||
@ -249,18 +238,23 @@ nouveau_sgdma_init(struct drm_device *dev)
|
||||
nv_wo32(gpuobj, 4, aper_size - 1);
|
||||
for (i = 2; i < 2 + (aper_size >> 12); i++)
|
||||
nv_wo32(gpuobj, i * 4, 0x00000000);
|
||||
} else {
|
||||
for (i = 0; i < obj_size; i += 8) {
|
||||
nv_wo32(gpuobj, i + 0, 0x00000000);
|
||||
nv_wo32(gpuobj, i + 4, 0x00000000);
|
||||
}
|
||||
|
||||
dev_priv->gart_info.sg_ctxdma = gpuobj;
|
||||
dev_priv->gart_info.aper_base = 0;
|
||||
dev_priv->gart_info.aper_size = aper_size;
|
||||
} else
|
||||
if (dev_priv->chan_vm) {
|
||||
ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
|
||||
12, NV_MEM_ACCESS_RW,
|
||||
&dev_priv->gart_info.vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
|
||||
dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
|
||||
}
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
|
||||
dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
|
||||
dev_priv->gart_info.aper_base = 0;
|
||||
dev_priv->gart_info.aper_size = aper_size;
|
||||
dev_priv->gart_info.sg_ctxdma = gpuobj;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -270,6 +264,7 @@ nouveau_sgdma_takedown(struct drm_device *dev)
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
|
||||
nouveau_vm_put(&dev_priv->gart_info.vma);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -131,7 +131,6 @@ nv50_instmem_init(struct drm_device *dev)
|
||||
struct nouveau_channel *chan;
|
||||
struct nouveau_vm *vm;
|
||||
int ret, i;
|
||||
u64 nongart_o;
|
||||
u32 tmp;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
@ -216,15 +215,10 @@ nv50_instmem_init(struct drm_device *dev)
|
||||
for (i = 0; i < 8; i++)
|
||||
nv_wr32(dev, 0x1900 + (i*4), 0);
|
||||
|
||||
/* Create shared channel VM, space is reserved for GART mappings at
|
||||
* the beginning of this address space, it's managed separately
|
||||
* because TTM makes life painful
|
||||
/* Create shared channel VM, space is reserved at the beginning
|
||||
* to catch "NULL pointer" references
|
||||
*/
|
||||
dev_priv->vm_gart_base = 0x0020000000ULL;
|
||||
dev_priv->vm_gart_size = 512 * 1024 * 1024;
|
||||
nongart_o = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
|
||||
|
||||
ret = nouveau_vm_new(dev, 0, (1ULL << 40), nongart_o,
|
||||
ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
|
||||
29, 12, 16, &dev_priv->chan_vm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
Loading…
x
Reference in New Issue
Block a user