mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-11 16:29:05 +00:00
drm/nouveau/fifo: split user classes out from engine implementations
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
0ce41e3c66
commit
9a65a38c45
@ -18,30 +18,6 @@ nvkm_fifo_chan(void *obj)
|
||||
return (void *)nv_namedb(obj);
|
||||
}
|
||||
|
||||
#define nvkm_fifo_channel_create(p,e,c,b,a,s,n,m,d) \
|
||||
nvkm_fifo_channel_create_((p), (e), (c), (b), (a), (s), (n), \
|
||||
(m), sizeof(**d), (void **)d)
|
||||
#define nvkm_fifo_channel_init(p) \
|
||||
nvkm_namedb_init(&(p)->namedb)
|
||||
#define nvkm_fifo_channel_fini(p,s) \
|
||||
nvkm_namedb_fini(&(p)->namedb, (s))
|
||||
|
||||
int nvkm_fifo_channel_create_(struct nvkm_object *,
|
||||
struct nvkm_object *,
|
||||
struct nvkm_oclass *,
|
||||
int bar, u32 addr, u32 size, u64 push,
|
||||
u64 engmask, int len, void **);
|
||||
void nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *);
|
||||
|
||||
#define _nvkm_fifo_channel_init _nvkm_namedb_init
|
||||
#define _nvkm_fifo_channel_fini _nvkm_namedb_fini
|
||||
|
||||
void _nvkm_fifo_channel_dtor(struct nvkm_object *);
|
||||
int _nvkm_fifo_channel_map(struct nvkm_object *, u64 *, u32 *);
|
||||
u32 _nvkm_fifo_channel_rd32(struct nvkm_object *, u64);
|
||||
void _nvkm_fifo_channel_wr32(struct nvkm_object *, u64, u32);
|
||||
int _nvkm_fifo_channel_ntfy(struct nvkm_object *, u32, struct nvkm_event **);
|
||||
|
||||
#include <core/gpuobj.h>
|
||||
|
||||
struct nvkm_fifo_base {
|
||||
|
@ -11,3 +11,20 @@ nvkm-y += nvkm/engine/fifo/gk20a.o
|
||||
nvkm-y += nvkm/engine/fifo/gk208.o
|
||||
nvkm-y += nvkm/engine/fifo/gm204.o
|
||||
nvkm-y += nvkm/engine/fifo/gm20b.o
|
||||
|
||||
nvkm-y += nvkm/engine/fifo/chan.o
|
||||
nvkm-y += nvkm/engine/fifo/channv50.o
|
||||
nvkm-y += nvkm/engine/fifo/chang84.o
|
||||
|
||||
nvkm-y += nvkm/engine/fifo/dmanv04.o
|
||||
nvkm-y += nvkm/engine/fifo/dmanv10.o
|
||||
nvkm-y += nvkm/engine/fifo/dmanv17.o
|
||||
nvkm-y += nvkm/engine/fifo/dmanv40.o
|
||||
nvkm-y += nvkm/engine/fifo/dmanv50.o
|
||||
nvkm-y += nvkm/engine/fifo/dmag84.o
|
||||
|
||||
nvkm-y += nvkm/engine/fifo/gpfifonv50.o
|
||||
nvkm-y += nvkm/engine/fifo/gpfifog84.o
|
||||
nvkm-y += nvkm/engine/fifo/gpfifogf100.o
|
||||
nvkm-y += nvkm/engine/fifo/gpfifogk104.o
|
||||
nvkm-y += nvkm/engine/fifo/gpfifogm204.o
|
||||
|
@ -21,14 +21,12 @@
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include <engine/fifo.h>
|
||||
#include "priv.h"
|
||||
#include "chan.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <core/handle.h>
|
||||
#include <core/notify.h>
|
||||
#include <engine/dma.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/event.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
@ -73,184 +71,6 @@ nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size,
|
||||
struct nvkm_notify *notify)
|
||||
{
|
||||
if (size == 0) {
|
||||
notify->size = 0;
|
||||
notify->types = 1;
|
||||
notify->index = 0;
|
||||
return 0;
|
||||
}
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static const struct nvkm_event_func
|
||||
nvkm_fifo_event_func = {
|
||||
.ctor = nvkm_fifo_event_ctor,
|
||||
};
|
||||
|
||||
int
|
||||
nvkm_fifo_channel_create_(struct nvkm_object *parent,
|
||||
struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass,
|
||||
int bar, u32 addr, u32 size, u64 pushbuf,
|
||||
u64 engmask, int len, void **ptr)
|
||||
{
|
||||
struct nvkm_client *client = nvkm_client(parent);
|
||||
struct nvkm_dmaobj *dmaobj;
|
||||
struct nvkm_fifo *fifo = (void *)engine;
|
||||
struct nvkm_fifo_base *base = (void *)parent;
|
||||
struct nvkm_fifo_chan *chan;
|
||||
struct nvkm_subdev *subdev = &fifo->engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct nvkm_dma *dma = device->dma;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
/* create base object class */
|
||||
ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL,
|
||||
engmask, len, ptr);
|
||||
chan = *ptr;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* validate dma object representing push buffer */
|
||||
if (pushbuf) {
|
||||
dmaobj = nvkm_dma_search(dma, client, pushbuf);
|
||||
if (!dmaobj)
|
||||
return -ENOENT;
|
||||
|
||||
ret = dmaobj->func->bind(dmaobj, &base->gpuobj, 16,
|
||||
&chan->pushgpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* find a free fifo channel */
|
||||
spin_lock_irqsave(&fifo->lock, flags);
|
||||
for (chan->chid = fifo->min; chan->chid < fifo->max; chan->chid++) {
|
||||
if (!fifo->channel[chan->chid]) {
|
||||
fifo->channel[chan->chid] = nv_object(chan);
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&fifo->lock, flags);
|
||||
|
||||
if (chan->chid == fifo->max) {
|
||||
nvkm_error(subdev, "no free channels\n");
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
chan->addr = nv_device_resource_start(device, bar) +
|
||||
addr + size * chan->chid;
|
||||
chan->size = size;
|
||||
nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *chan)
|
||||
{
|
||||
struct nvkm_fifo *fifo = (void *)nv_object(chan)->engine;
|
||||
unsigned long flags;
|
||||
|
||||
if (chan->user)
|
||||
iounmap(chan->user);
|
||||
|
||||
spin_lock_irqsave(&fifo->lock, flags);
|
||||
fifo->channel[chan->chid] = NULL;
|
||||
spin_unlock_irqrestore(&fifo->lock, flags);
|
||||
|
||||
nvkm_gpuobj_del(&chan->pushgpu);
|
||||
nvkm_namedb_destroy(&chan->namedb);
|
||||
}
|
||||
|
||||
void
|
||||
_nvkm_fifo_channel_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nvkm_fifo_chan *chan = (void *)object;
|
||||
nvkm_fifo_channel_destroy(chan);
|
||||
}
|
||||
|
||||
int
|
||||
_nvkm_fifo_channel_map(struct nvkm_object *object, u64 *addr, u32 *size)
|
||||
{
|
||||
struct nvkm_fifo_chan *chan = (void *)object;
|
||||
*addr = chan->addr;
|
||||
*size = chan->size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32
|
||||
_nvkm_fifo_channel_rd32(struct nvkm_object *object, u64 addr)
|
||||
{
|
||||
struct nvkm_fifo_chan *chan = (void *)object;
|
||||
if (unlikely(!chan->user)) {
|
||||
chan->user = ioremap(chan->addr, chan->size);
|
||||
if (WARN_ON_ONCE(chan->user == NULL))
|
||||
return 0;
|
||||
}
|
||||
return ioread32_native(chan->user + addr);
|
||||
}
|
||||
|
||||
void
|
||||
_nvkm_fifo_channel_wr32(struct nvkm_object *object, u64 addr, u32 data)
|
||||
{
|
||||
struct nvkm_fifo_chan *chan = (void *)object;
|
||||
if (unlikely(!chan->user)) {
|
||||
chan->user = ioremap(chan->addr, chan->size);
|
||||
if (WARN_ON_ONCE(chan->user == NULL))
|
||||
return;
|
||||
}
|
||||
iowrite32_native(data, chan->user + addr);
|
||||
}
|
||||
|
||||
int
|
||||
nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
|
||||
struct nvkm_notify *notify)
|
||||
{
|
||||
union {
|
||||
struct nvif_notify_uevent_req none;
|
||||
} *req = data;
|
||||
int ret;
|
||||
|
||||
if (nvif_unvers(req->none)) {
|
||||
notify->size = sizeof(struct nvif_notify_uevent_rep);
|
||||
notify->types = 1;
|
||||
notify->index = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_fifo_uevent(struct nvkm_fifo *fifo)
|
||||
{
|
||||
struct nvif_notify_uevent_rep rep = {
|
||||
};
|
||||
nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep));
|
||||
}
|
||||
|
||||
int
|
||||
_nvkm_fifo_channel_ntfy(struct nvkm_object *object, u32 type,
|
||||
struct nvkm_event **event)
|
||||
{
|
||||
struct nvkm_fifo *fifo = (void *)object->engine;
|
||||
switch (type) {
|
||||
case G82_CHANNEL_DMA_V0_NTFY_UEVENT:
|
||||
if (nv_mclass(object) >= G82_CHANNEL_DMA) {
|
||||
*event = &fifo->uevent;
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_fifo_chid(struct nvkm_fifo *fifo, struct nvkm_object *object)
|
||||
{
|
||||
@ -280,6 +100,50 @@ nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid)
|
||||
return nvkm_client_name(chan);
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size,
|
||||
struct nvkm_notify *notify)
|
||||
{
|
||||
if (size == 0) {
|
||||
notify->size = 0;
|
||||
notify->types = 1;
|
||||
notify->index = 0;
|
||||
return 0;
|
||||
}
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static const struct nvkm_event_func
|
||||
nvkm_fifo_event_func = {
|
||||
.ctor = nvkm_fifo_event_ctor,
|
||||
};
|
||||
|
||||
int
|
||||
nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
|
||||
struct nvkm_notify *notify)
|
||||
{
|
||||
union {
|
||||
struct nvif_notify_uevent_req none;
|
||||
} *req = data;
|
||||
int ret;
|
||||
|
||||
if (nvif_unvers(req->none)) {
|
||||
notify->size = sizeof(struct nvif_notify_uevent_rep);
|
||||
notify->types = 1;
|
||||
notify->index = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_fifo_uevent(struct nvkm_fifo *fifo)
|
||||
{
|
||||
struct nvif_notify_uevent_rep rep = {
|
||||
};
|
||||
nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep));
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_fifo_destroy(struct nvkm_fifo *fifo)
|
||||
{
|
||||
|
162
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
Normal file
162
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
Normal file
@ -0,0 +1,162 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "chan.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <engine/dma.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
int
|
||||
_nvkm_fifo_channel_ntfy(struct nvkm_object *object, u32 type,
|
||||
struct nvkm_event **event)
|
||||
{
|
||||
struct nvkm_fifo *fifo = (void *)object->engine;
|
||||
switch (type) {
|
||||
case G82_CHANNEL_DMA_V0_NTFY_UEVENT:
|
||||
if (nv_mclass(object) >= G82_CHANNEL_DMA) {
|
||||
*event = &fifo->uevent;
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int
|
||||
_nvkm_fifo_channel_map(struct nvkm_object *object, u64 *addr, u32 *size)
|
||||
{
|
||||
struct nvkm_fifo_chan *chan = (void *)object;
|
||||
*addr = chan->addr;
|
||||
*size = chan->size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32
|
||||
_nvkm_fifo_channel_rd32(struct nvkm_object *object, u64 addr)
|
||||
{
|
||||
struct nvkm_fifo_chan *chan = (void *)object;
|
||||
if (unlikely(!chan->user)) {
|
||||
chan->user = ioremap(chan->addr, chan->size);
|
||||
if (WARN_ON_ONCE(chan->user == NULL))
|
||||
return 0;
|
||||
}
|
||||
return ioread32_native(chan->user + addr);
|
||||
}
|
||||
|
||||
void
|
||||
_nvkm_fifo_channel_wr32(struct nvkm_object *object, u64 addr, u32 data)
|
||||
{
|
||||
struct nvkm_fifo_chan *chan = (void *)object;
|
||||
if (unlikely(!chan->user)) {
|
||||
chan->user = ioremap(chan->addr, chan->size);
|
||||
if (WARN_ON_ONCE(chan->user == NULL))
|
||||
return;
|
||||
}
|
||||
iowrite32_native(data, chan->user + addr);
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *chan)
|
||||
{
|
||||
struct nvkm_fifo *fifo = (void *)nv_object(chan)->engine;
|
||||
unsigned long flags;
|
||||
|
||||
if (chan->user)
|
||||
iounmap(chan->user);
|
||||
|
||||
spin_lock_irqsave(&fifo->lock, flags);
|
||||
fifo->channel[chan->chid] = NULL;
|
||||
spin_unlock_irqrestore(&fifo->lock, flags);
|
||||
|
||||
nvkm_gpuobj_del(&chan->pushgpu);
|
||||
nvkm_namedb_destroy(&chan->namedb);
|
||||
}
|
||||
|
||||
void
|
||||
_nvkm_fifo_channel_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nvkm_fifo_chan *chan = (void *)object;
|
||||
nvkm_fifo_channel_destroy(chan);
|
||||
}
|
||||
|
||||
int
|
||||
nvkm_fifo_channel_create_(struct nvkm_object *parent,
|
||||
struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass,
|
||||
int bar, u32 addr, u32 size, u64 pushbuf,
|
||||
u64 engmask, int len, void **ptr)
|
||||
{
|
||||
struct nvkm_client *client = nvkm_client(parent);
|
||||
struct nvkm_fifo *fifo = (void *)engine;
|
||||
struct nvkm_fifo_base *base = (void *)parent;
|
||||
struct nvkm_fifo_chan *chan;
|
||||
struct nvkm_subdev *subdev = &fifo->engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct nvkm_dmaobj *dmaobj;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
/* create base object class */
|
||||
ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL,
|
||||
engmask, len, ptr);
|
||||
chan = *ptr;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* validate dma object representing push buffer */
|
||||
if (pushbuf) {
|
||||
dmaobj = nvkm_dma_search(device->dma, client, pushbuf);
|
||||
if (!dmaobj)
|
||||
return -ENOENT;
|
||||
|
||||
ret = nvkm_object_bind(&dmaobj->object, &base->gpuobj, 16,
|
||||
&chan->pushgpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* find a free fifo channel */
|
||||
spin_lock_irqsave(&fifo->lock, flags);
|
||||
for (chan->chid = fifo->min; chan->chid < fifo->max; chan->chid++) {
|
||||
if (!fifo->channel[chan->chid]) {
|
||||
fifo->channel[chan->chid] = nv_object(chan);
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&fifo->lock, flags);
|
||||
|
||||
if (chan->chid == fifo->max) {
|
||||
nvkm_error(subdev, "no free channels\n");
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
chan->addr = nv_device_resource_start(device, bar) +
|
||||
addr + size * chan->chid;
|
||||
chan->size = size;
|
||||
nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0);
|
||||
return 0;
|
||||
}
|
28
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
Normal file
28
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
Normal file
@ -0,0 +1,28 @@
|
||||
#ifndef __NVKM_FIFO_CHAN_H__
|
||||
#define __NVKM_FIFO_CHAN_H__
|
||||
#include "priv.h"
|
||||
|
||||
#define nvkm_fifo_channel_create(p,e,c,b,a,s,n,m,d) \
|
||||
nvkm_fifo_channel_create_((p), (e), (c), (b), (a), (s), (n), \
|
||||
(m), sizeof(**d), (void **)d)
|
||||
#define nvkm_fifo_channel_init(p) \
|
||||
nvkm_namedb_init(&(p)->namedb)
|
||||
#define nvkm_fifo_channel_fini(p,s) \
|
||||
nvkm_namedb_fini(&(p)->namedb, (s))
|
||||
|
||||
int nvkm_fifo_channel_create_(struct nvkm_object *,
|
||||
struct nvkm_object *,
|
||||
struct nvkm_oclass *,
|
||||
int bar, u32 addr, u32 size, u64 push,
|
||||
u64 engmask, int len, void **);
|
||||
void nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *);
|
||||
|
||||
#define _nvkm_fifo_channel_init _nvkm_namedb_init
|
||||
#define _nvkm_fifo_channel_fini _nvkm_namedb_fini
|
||||
|
||||
void _nvkm_fifo_channel_dtor(struct nvkm_object *);
|
||||
int _nvkm_fifo_channel_map(struct nvkm_object *, u64 *, u32 *);
|
||||
u32 _nvkm_fifo_channel_rd32(struct nvkm_object *, u64);
|
||||
void _nvkm_fifo_channel_wr32(struct nvkm_object *, u64, u32);
|
||||
int _nvkm_fifo_channel_ntfy(struct nvkm_object *, u32, struct nvkm_event **);
|
||||
#endif
|
231
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
Normal file
231
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
Normal file
@ -0,0 +1,231 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "channv50.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <core/ramht.h>
|
||||
#include <subdev/timer.h>
|
||||
|
||||
int
|
||||
g84_fifo_context_detach(struct nvkm_object *parent, bool suspend,
|
||||
struct nvkm_object *object)
|
||||
{
|
||||
struct nv50_fifo *fifo = (void *)parent->engine;
|
||||
struct nv50_fifo_base *base = (void *)parent->parent;
|
||||
struct nv50_fifo_chan *chan = (void *)parent;
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 addr, save, engn;
|
||||
bool done;
|
||||
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_SW : return 0;
|
||||
case NVDEV_ENGINE_GR : engn = 0; addr = 0x0020; break;
|
||||
case NVDEV_ENGINE_VP :
|
||||
case NVDEV_ENGINE_MSPDEC: engn = 3; addr = 0x0040; break;
|
||||
case NVDEV_ENGINE_MSPPP :
|
||||
case NVDEV_ENGINE_MPEG : engn = 1; addr = 0x0060; break;
|
||||
case NVDEV_ENGINE_BSP :
|
||||
case NVDEV_ENGINE_MSVLD : engn = 5; addr = 0x0080; break;
|
||||
case NVDEV_ENGINE_CIPHER:
|
||||
case NVDEV_ENGINE_SEC : engn = 4; addr = 0x00a0; break;
|
||||
case NVDEV_ENGINE_CE0 : engn = 2; addr = 0x00c0; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn);
|
||||
nvkm_wr32(device, 0x0032fc, nv_gpuobj(base)->addr >> 12);
|
||||
done = nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
|
||||
break;
|
||||
) >= 0;
|
||||
nvkm_wr32(device, 0x002520, save);
|
||||
if (!done) {
|
||||
nvkm_error(subdev, "channel %d [%s] unload timeout\n",
|
||||
chan->base.chid, nvkm_client_name(chan));
|
||||
if (suspend)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
nvkm_kmap(base->eng);
|
||||
nvkm_wo32(base->eng, addr + 0x00, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x04, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x08, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x0c, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x10, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x14, 0x00000000);
|
||||
nvkm_done(base->eng);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
g84_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
|
||||
{
|
||||
struct nv50_fifo_base *base = (void *)parent->parent;
|
||||
struct nvkm_gpuobj *ectx = (void *)object;
|
||||
u64 limit = ectx->addr + ectx->size - 1;
|
||||
u64 start = ectx->addr;
|
||||
u32 addr;
|
||||
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_SW : return 0;
|
||||
case NVDEV_ENGINE_GR : addr = 0x0020; break;
|
||||
case NVDEV_ENGINE_VP :
|
||||
case NVDEV_ENGINE_MSPDEC: addr = 0x0040; break;
|
||||
case NVDEV_ENGINE_MSPPP :
|
||||
case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
|
||||
case NVDEV_ENGINE_BSP :
|
||||
case NVDEV_ENGINE_MSVLD : addr = 0x0080; break;
|
||||
case NVDEV_ENGINE_CIPHER:
|
||||
case NVDEV_ENGINE_SEC : addr = 0x00a0; break;
|
||||
case NVDEV_ENGINE_CE0 : addr = 0x00c0; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
|
||||
nvkm_kmap(base->eng);
|
||||
nvkm_wo32(base->eng, addr + 0x00, 0x00190000);
|
||||
nvkm_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
|
||||
nvkm_wo32(base->eng, addr + 0x08, lower_32_bits(start));
|
||||
nvkm_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
|
||||
upper_32_bits(start));
|
||||
nvkm_wo32(base->eng, addr + 0x10, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x14, 0x00000000);
|
||||
nvkm_done(base->eng);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
g84_fifo_object_attach(struct nvkm_object *parent,
|
||||
struct nvkm_object *object, u32 handle)
|
||||
{
|
||||
struct nv50_fifo_chan *chan = (void *)parent;
|
||||
u32 context;
|
||||
|
||||
if (nv_iclass(object, NV_GPUOBJ_CLASS))
|
||||
context = nv_gpuobj(object)->node->offset >> 4;
|
||||
else
|
||||
context = 0x00000004; /* just non-zero */
|
||||
|
||||
if (object->engine) {
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_DMAOBJ:
|
||||
case NVDEV_ENGINE_SW : context |= 0x00000000; break;
|
||||
case NVDEV_ENGINE_GR : context |= 0x00100000; break;
|
||||
case NVDEV_ENGINE_MPEG :
|
||||
case NVDEV_ENGINE_MSPPP : context |= 0x00200000; break;
|
||||
case NVDEV_ENGINE_ME :
|
||||
case NVDEV_ENGINE_CE0 : context |= 0x00300000; break;
|
||||
case NVDEV_ENGINE_VP :
|
||||
case NVDEV_ENGINE_MSPDEC: context |= 0x00400000; break;
|
||||
case NVDEV_ENGINE_CIPHER:
|
||||
case NVDEV_ENGINE_SEC :
|
||||
case NVDEV_ENGINE_VIC : context |= 0x00500000; break;
|
||||
case NVDEV_ENGINE_BSP :
|
||||
case NVDEV_ENGINE_MSVLD : context |= 0x00600000; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return nvkm_ramht_insert(chan->ramht, NULL, 0, 0, handle, context);
|
||||
}
|
||||
|
||||
int
|
||||
g84_fifo_chan_init(struct nvkm_object *object)
|
||||
{
|
||||
struct nv50_fifo *fifo = (void *)object->engine;
|
||||
struct nv50_fifo_base *base = (void *)object->parent;
|
||||
struct nv50_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_gpuobj *ramfc = base->ramfc;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
u32 chid = chan->base.chid;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_channel_init(&chan->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8);
|
||||
nv50_fifo_runlist_update(fifo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
g84_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_device *device = nv_engine(engine)->subdev.device;
|
||||
struct nv50_fifo_base *base;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
|
||||
0x1000, NVOBJ_FLAG_HEAP, &base);
|
||||
*pobject = nv_object(base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(device, 0x0200, 0, true, &base->base.gpuobj,
|
||||
&base->eng);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(device, 0x4000, 0, false, &base->base.gpuobj,
|
||||
&base->pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(device, 0x1000, 0x400, true, &base->base.gpuobj,
|
||||
&base->cache);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(device, 0x100, 0x100, true, &base->base.gpuobj,
|
||||
&base->ramfc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_oclass
|
||||
g84_fifo_cclass = {
|
||||
.handle = NV_ENGCTX(FIFO, 0x84),
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = g84_fifo_context_ctor,
|
||||
.dtor = nv50_fifo_context_dtor,
|
||||
.init = _nvkm_fifo_context_init,
|
||||
.fini = _nvkm_fifo_context_fini,
|
||||
.rd32 = _nvkm_fifo_context_rd32,
|
||||
.wr32 = _nvkm_fifo_context_wr32,
|
||||
},
|
||||
};
|
23
drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
Normal file
23
drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
Normal file
@ -0,0 +1,23 @@
|
||||
#ifndef __GF100_FIFO_CHAN_H__
|
||||
#define __GF100_FIFO_CHAN_H__
|
||||
#include "chan.h"
|
||||
#include "gf100.h"
|
||||
|
||||
struct gf100_fifo_base {
|
||||
struct nvkm_fifo_base base;
|
||||
struct nvkm_gpuobj *pgd;
|
||||
struct nvkm_vm *vm;
|
||||
};
|
||||
|
||||
struct gf100_fifo_chan {
|
||||
struct nvkm_fifo_chan base;
|
||||
enum {
|
||||
STOPPED,
|
||||
RUNNING,
|
||||
KILLED
|
||||
} state;
|
||||
};
|
||||
|
||||
extern struct nvkm_oclass gf100_fifo_cclass;
|
||||
extern struct nvkm_oclass gf100_fifo_sclass[];
|
||||
#endif
|
27
drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
Normal file
27
drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
Normal file
@ -0,0 +1,27 @@
|
||||
#ifndef __GK104_FIFO_CHAN_H__
|
||||
#define __GK104_FIFO_CHAN_H__
|
||||
#include "chan.h"
|
||||
#include "gk104.h"
|
||||
|
||||
struct gk104_fifo_base {
|
||||
struct nvkm_fifo_base base;
|
||||
struct nvkm_gpuobj *pgd;
|
||||
struct nvkm_vm *vm;
|
||||
};
|
||||
|
||||
struct gk104_fifo_chan {
|
||||
struct nvkm_fifo_chan base;
|
||||
u32 engine;
|
||||
enum {
|
||||
STOPPED,
|
||||
RUNNING,
|
||||
KILLED
|
||||
} state;
|
||||
};
|
||||
|
||||
extern struct nvkm_oclass gk104_fifo_cclass;
|
||||
extern struct nvkm_oclass gk104_fifo_sclass[];
|
||||
extern struct nvkm_ofuncs gk104_fifo_chan_ofuncs;
|
||||
|
||||
extern struct nvkm_oclass gm204_fifo_sclass[];
|
||||
#endif
|
24
drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
Normal file
24
drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
Normal file
@ -0,0 +1,24 @@
|
||||
#ifndef __NV04_FIFO_CHAN_H__
|
||||
#define __NV04_FIFO_CHAN_H__
|
||||
#include "chan.h"
|
||||
#include "nv04.h"
|
||||
|
||||
struct nv04_fifo_chan {
|
||||
struct nvkm_fifo_chan base;
|
||||
u32 subc[8];
|
||||
u32 ramfc;
|
||||
};
|
||||
|
||||
int nv04_fifo_object_attach(struct nvkm_object *, struct nvkm_object *, u32);
|
||||
void nv04_fifo_object_detach(struct nvkm_object *, int);
|
||||
|
||||
void nv04_fifo_chan_dtor(struct nvkm_object *);
|
||||
int nv04_fifo_chan_init(struct nvkm_object *);
|
||||
int nv04_fifo_chan_fini(struct nvkm_object *, bool suspend);
|
||||
|
||||
extern struct nvkm_oclass nv04_fifo_cclass;
|
||||
extern struct nvkm_oclass nv04_fifo_sclass[];
|
||||
extern struct nvkm_oclass nv10_fifo_sclass[];
|
||||
extern struct nvkm_oclass nv17_fifo_sclass[];
|
||||
extern struct nvkm_oclass nv40_fifo_sclass[];
|
||||
#endif
|
259
drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
Normal file
259
drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
Normal file
@ -0,0 +1,259 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "channv50.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <core/ramht.h>
|
||||
#include <subdev/timer.h>
|
||||
|
||||
int
|
||||
nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend,
|
||||
struct nvkm_object *object)
|
||||
{
|
||||
struct nv50_fifo *fifo = (void *)parent->engine;
|
||||
struct nv50_fifo_base *base = (void *)parent->parent;
|
||||
struct nv50_fifo_chan *chan = (void *)parent;
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 addr, me;
|
||||
int ret = 0;
|
||||
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_SW : return 0;
|
||||
case NVDEV_ENGINE_GR : addr = 0x0000; break;
|
||||
case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* HW bug workaround:
|
||||
*
|
||||
* PFIFO will hang forever if the connected engines don't report
|
||||
* that they've processed the context switch request.
|
||||
*
|
||||
* In order for the kickoff to work, we need to ensure all the
|
||||
* connected engines are in a state where they can answer.
|
||||
*
|
||||
* Newer chipsets don't seem to suffer from this issue, and well,
|
||||
* there's also a "ignore these engines" bitmask reg we can use
|
||||
* if we hit the issue there..
|
||||
*/
|
||||
me = nvkm_mask(device, 0x00b860, 0x00000001, 0x00000001);
|
||||
|
||||
/* do the kickoff... */
|
||||
nvkm_wr32(device, 0x0032fc, nv_gpuobj(base)->addr >> 12);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
|
||||
break;
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "channel %d [%s] unload timeout\n",
|
||||
chan->base.chid, nvkm_client_name(chan));
|
||||
if (suspend)
|
||||
ret = -EBUSY;
|
||||
}
|
||||
nvkm_wr32(device, 0x00b860, me);
|
||||
|
||||
if (ret == 0) {
|
||||
nvkm_kmap(base->eng);
|
||||
nvkm_wo32(base->eng, addr + 0x00, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x04, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x08, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x0c, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x10, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x14, 0x00000000);
|
||||
nvkm_done(base->eng);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nv50_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
|
||||
{
|
||||
struct nv50_fifo_base *base = (void *)parent->parent;
|
||||
struct nvkm_gpuobj *ectx = (void *)object;
|
||||
u64 limit = ectx->addr + ectx->size - 1;
|
||||
u64 start = ectx->addr;
|
||||
u32 addr;
|
||||
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_SW : return 0;
|
||||
case NVDEV_ENGINE_GR : addr = 0x0000; break;
|
||||
case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
|
||||
|
||||
nvkm_kmap(base->eng);
|
||||
nvkm_wo32(base->eng, addr + 0x00, 0x00190000);
|
||||
nvkm_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
|
||||
nvkm_wo32(base->eng, addr + 0x08, lower_32_bits(start));
|
||||
nvkm_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
|
||||
upper_32_bits(start));
|
||||
nvkm_wo32(base->eng, addr + 0x10, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x14, 0x00000000);
|
||||
nvkm_done(base->eng);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv50_fifo_object_detach(struct nvkm_object *parent, int cookie)
|
||||
{
|
||||
struct nv50_fifo_chan *chan = (void *)parent;
|
||||
nvkm_ramht_remove(chan->ramht, cookie);
|
||||
}
|
||||
|
||||
int
|
||||
nv50_fifo_object_attach(struct nvkm_object *parent,
|
||||
struct nvkm_object *object, u32 handle)
|
||||
{
|
||||
struct nv50_fifo_chan *chan = (void *)parent;
|
||||
u32 context;
|
||||
|
||||
if (nv_iclass(object, NV_GPUOBJ_CLASS))
|
||||
context = nv_gpuobj(object)->node->offset >> 4;
|
||||
else
|
||||
context = 0x00000004; /* just non-zero */
|
||||
|
||||
if (object->engine) {
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_DMAOBJ:
|
||||
case NVDEV_ENGINE_SW : context |= 0x00000000; break;
|
||||
case NVDEV_ENGINE_GR : context |= 0x00100000; break;
|
||||
case NVDEV_ENGINE_MPEG : context |= 0x00200000; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return nvkm_ramht_insert(chan->ramht, NULL, 0, 0, handle, context);
|
||||
}
|
||||
|
||||
int
|
||||
nv50_fifo_chan_fini(struct nvkm_object *object, bool suspend)
|
||||
{
|
||||
struct nv50_fifo *fifo = (void *)object->engine;
|
||||
struct nv50_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
u32 chid = chan->base.chid;
|
||||
|
||||
/* remove channel from runlist, fifo will unload context */
|
||||
nvkm_mask(device, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
|
||||
nv50_fifo_runlist_update(fifo);
|
||||
nvkm_wr32(device, 0x002600 + (chid * 4), 0x00000000);
|
||||
|
||||
return nvkm_fifo_channel_fini(&chan->base, suspend);
|
||||
}
|
||||
|
||||
int
|
||||
nv50_fifo_chan_init(struct nvkm_object *object)
|
||||
{
|
||||
struct nv50_fifo *fifo = (void *)object->engine;
|
||||
struct nv50_fifo_base *base = (void *)object->parent;
|
||||
struct nv50_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_gpuobj *ramfc = base->ramfc;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
u32 chid = chan->base.chid;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_channel_init(&chan->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12);
|
||||
nv50_fifo_runlist_update(fifo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv50_fifo_chan_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nv50_fifo_chan *chan = (void *)object;
|
||||
nvkm_ramht_del(&chan->ramht);
|
||||
nvkm_fifo_channel_destroy(&chan->base);
|
||||
}
|
||||
|
||||
void
|
||||
nv50_fifo_context_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nv50_fifo_base *base = (void *)object;
|
||||
nvkm_vm_ref(NULL, &base->vm, base->pgd);
|
||||
nvkm_gpuobj_del(&base->pgd);
|
||||
nvkm_gpuobj_del(&base->eng);
|
||||
nvkm_gpuobj_del(&base->ramfc);
|
||||
nvkm_gpuobj_del(&base->cache);
|
||||
nvkm_fifo_context_destroy(&base->base);
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_device *device = nv_engine(engine)->subdev.device;
|
||||
struct nv50_fifo_base *base;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
|
||||
0x1000, NVOBJ_FLAG_HEAP, &base);
|
||||
*pobject = nv_object(base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(device, 0x0200, 0x1000, true, &base->base.gpuobj,
|
||||
&base->ramfc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(device, 0x1200, 0, true, &base->base.gpuobj,
|
||||
&base->eng);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(device, 0x4000, 0, false, &base->base.gpuobj,
|
||||
&base->pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_oclass
|
||||
nv50_fifo_cclass = {
|
||||
.handle = NV_ENGCTX(FIFO, 0x50),
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = nv50_fifo_context_ctor,
|
||||
.dtor = nv50_fifo_context_dtor,
|
||||
.init = _nvkm_fifo_context_init,
|
||||
.fini = _nvkm_fifo_context_fini,
|
||||
.rd32 = _nvkm_fifo_context_rd32,
|
||||
.wr32 = _nvkm_fifo_context_wr32,
|
||||
},
|
||||
};
|
42
drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
Normal file
42
drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
Normal file
@ -0,0 +1,42 @@
|
||||
#ifndef __NV50_FIFO_CHAN_H__
|
||||
#define __NV50_FIFO_CHAN_H__
|
||||
#include "chan.h"
|
||||
#include "nv50.h"
|
||||
|
||||
struct nv50_fifo_base {
|
||||
struct nvkm_fifo_base base;
|
||||
struct nvkm_gpuobj *ramfc;
|
||||
struct nvkm_gpuobj *cache;
|
||||
struct nvkm_gpuobj *eng;
|
||||
struct nvkm_gpuobj *pgd;
|
||||
struct nvkm_vm *vm;
|
||||
};
|
||||
|
||||
struct nv50_fifo_chan {
|
||||
struct nvkm_fifo_chan base;
|
||||
u32 subc[8];
|
||||
struct nvkm_ramht *ramht;
|
||||
};
|
||||
|
||||
extern struct nvkm_oclass nv50_fifo_cclass;
|
||||
extern struct nvkm_oclass nv50_fifo_sclass[];
|
||||
void nv50_fifo_context_dtor(struct nvkm_object *);
|
||||
void nv50_fifo_chan_dtor(struct nvkm_object *);
|
||||
int nv50_fifo_chan_init(struct nvkm_object *);
|
||||
int nv50_fifo_chan_fini(struct nvkm_object *, bool);
|
||||
int nv50_fifo_context_attach(struct nvkm_object *, struct nvkm_object *);
|
||||
int nv50_fifo_context_detach(struct nvkm_object *, bool,
|
||||
struct nvkm_object *);
|
||||
int nv50_fifo_object_attach(struct nvkm_object *, struct nvkm_object *, u32);
|
||||
void nv50_fifo_object_detach(struct nvkm_object *, int);
|
||||
extern struct nvkm_ofuncs nv50_fifo_ofuncs_ind;
|
||||
|
||||
extern struct nvkm_oclass g84_fifo_cclass;
|
||||
extern struct nvkm_oclass g84_fifo_sclass[];
|
||||
int g84_fifo_chan_init(struct nvkm_object *);
|
||||
int g84_fifo_context_attach(struct nvkm_object *, struct nvkm_object *);
|
||||
int g84_fifo_context_detach(struct nvkm_object *, bool,
|
||||
struct nvkm_object *);
|
||||
int g84_fifo_object_attach(struct nvkm_object *, struct nvkm_object *, u32);
|
||||
extern struct nvkm_ofuncs g84_fifo_ofuncs_ind;
|
||||
#endif
|
127
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
Normal file
127
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
Normal file
@ -0,0 +1,127 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "channv50.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <core/ramht.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
static int
|
||||
g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct nv50_channel_dma_v0 v0;
|
||||
} *args = data;
|
||||
struct nvkm_device *device = parent->engine->subdev.device;
|
||||
struct nv50_fifo_base *base = (void *)parent;
|
||||
struct nv50_fifo_chan *chan;
|
||||
int ret;
|
||||
|
||||
nvif_ioctl(parent, "create channel dma size %d\n", size);
|
||||
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||
nvif_ioctl(parent, "create channel dma vers %d vm %llx "
|
||||
"pushbuf %llx offset %016llx\n",
|
||||
args->v0.version, args->v0.vm, args->v0.pushbuf,
|
||||
args->v0.offset);
|
||||
if (args->v0.vm)
|
||||
return -ENOENT;
|
||||
} else
|
||||
return ret;
|
||||
|
||||
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
|
||||
0x2000, args->v0.pushbuf,
|
||||
(1ULL << NVDEV_ENGINE_DMAOBJ) |
|
||||
(1ULL << NVDEV_ENGINE_SW) |
|
||||
(1ULL << NVDEV_ENGINE_GR) |
|
||||
(1ULL << NVDEV_ENGINE_MPEG) |
|
||||
(1ULL << NVDEV_ENGINE_ME) |
|
||||
(1ULL << NVDEV_ENGINE_VP) |
|
||||
(1ULL << NVDEV_ENGINE_CIPHER) |
|
||||
(1ULL << NVDEV_ENGINE_SEC) |
|
||||
(1ULL << NVDEV_ENGINE_BSP) |
|
||||
(1ULL << NVDEV_ENGINE_MSVLD) |
|
||||
(1ULL << NVDEV_ENGINE_MSPDEC) |
|
||||
(1ULL << NVDEV_ENGINE_MSPPP) |
|
||||
(1ULL << NVDEV_ENGINE_CE0) |
|
||||
(1ULL << NVDEV_ENGINE_VIC), &chan);
|
||||
*pobject = nv_object(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
chan->base.inst = base->base.gpuobj.addr;
|
||||
args->v0.chid = chan->base.chid;
|
||||
|
||||
ret = nvkm_ramht_new(device, 0x8000, 16, &base->base.gpuobj,
|
||||
&chan->ramht);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_parent(chan)->context_attach = g84_fifo_context_attach;
|
||||
nv_parent(chan)->context_detach = g84_fifo_context_detach;
|
||||
nv_parent(chan)->object_attach = g84_fifo_object_attach;
|
||||
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
|
||||
|
||||
nvkm_kmap(base->ramfc);
|
||||
nvkm_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
|
||||
nvkm_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
|
||||
nvkm_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
|
||||
nvkm_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
|
||||
nvkm_wo32(base->ramfc, 0x3c, 0x003f6078);
|
||||
nvkm_wo32(base->ramfc, 0x44, 0x01003fff);
|
||||
nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
|
||||
nvkm_wo32(base->ramfc, 0x4c, 0xffffffff);
|
||||
nvkm_wo32(base->ramfc, 0x60, 0x7fffffff);
|
||||
nvkm_wo32(base->ramfc, 0x78, 0x00000000);
|
||||
nvkm_wo32(base->ramfc, 0x7c, 0x30000001);
|
||||
nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
|
||||
(4 << 24) /* SEARCH_FULL */ |
|
||||
(chan->ramht->gpuobj->node->offset >> 4));
|
||||
nvkm_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
|
||||
nvkm_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
|
||||
nvkm_done(base->ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nvkm_ofuncs
|
||||
g84_fifo_ofuncs_dma = {
|
||||
.ctor = g84_fifo_chan_ctor_dma,
|
||||
.dtor = nv50_fifo_chan_dtor,
|
||||
.init = g84_fifo_chan_init,
|
||||
.fini = nv50_fifo_chan_fini,
|
||||
.map = _nvkm_fifo_channel_map,
|
||||
.rd32 = _nvkm_fifo_channel_rd32,
|
||||
.wr32 = _nvkm_fifo_channel_wr32,
|
||||
.ntfy = _nvkm_fifo_channel_ntfy
|
||||
};
|
||||
|
||||
struct nvkm_oclass
|
||||
g84_fifo_sclass[] = {
|
||||
{ G82_CHANNEL_DMA, &g84_fifo_ofuncs_dma },
|
||||
{ G82_CHANNEL_GPFIFO, &g84_fifo_ofuncs_ind },
|
||||
{}
|
||||
};
|
282
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
Normal file
282
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
Normal file
@ -0,0 +1,282 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "channv04.h"
|
||||
#include "regsnv04.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <core/ramht.h>
|
||||
#include <subdev/instmem.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
int
|
||||
nv04_fifo_context_attach(struct nvkm_object *parent,
|
||||
struct nvkm_object *object)
|
||||
{
|
||||
nv_engctx(object)->addr = nvkm_fifo_chan(parent)->chid;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv04_fifo_object_detach(struct nvkm_object *parent, int cookie)
|
||||
{
|
||||
struct nv04_fifo *fifo = (void *)parent->engine;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
mutex_lock(&nv_subdev(fifo)->mutex);
|
||||
nvkm_ramht_remove(imem->ramht, cookie);
|
||||
mutex_unlock(&nv_subdev(fifo)->mutex);
|
||||
}
|
||||
|
||||
int
|
||||
nv04_fifo_object_attach(struct nvkm_object *parent,
|
||||
struct nvkm_object *object, u32 handle)
|
||||
{
|
||||
struct nv04_fifo *fifo = (void *)parent->engine;
|
||||
struct nv04_fifo_chan *chan = (void *)parent;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
u32 context, chid = chan->base.chid;
|
||||
int ret;
|
||||
|
||||
if (nv_iclass(object, NV_GPUOBJ_CLASS))
|
||||
context = nv_gpuobj(object)->addr >> 4;
|
||||
else
|
||||
context = 0x00000004; /* just non-zero */
|
||||
|
||||
if (object->engine) {
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_DMAOBJ:
|
||||
case NVDEV_ENGINE_SW:
|
||||
context |= 0x00000000;
|
||||
break;
|
||||
case NVDEV_ENGINE_GR:
|
||||
context |= 0x00010000;
|
||||
break;
|
||||
case NVDEV_ENGINE_MPEG:
|
||||
context |= 0x00020000;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
context |= 0x80000000; /* valid */
|
||||
context |= chid << 24;
|
||||
|
||||
mutex_lock(&nv_subdev(fifo)->mutex);
|
||||
ret = nvkm_ramht_insert(imem->ramht, NULL, chid, 0, handle, context);
|
||||
mutex_unlock(&nv_subdev(fifo)->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
|
||||
{
|
||||
struct nv04_fifo *fifo = (void *)object->engine;
|
||||
struct nv04_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
struct nvkm_memory *fctx = device->imem->ramfc;
|
||||
struct ramfc_desc *c;
|
||||
unsigned long flags;
|
||||
u32 data = chan->ramfc;
|
||||
u32 chid;
|
||||
|
||||
/* prevent fifo context switches */
|
||||
spin_lock_irqsave(&fifo->base.lock, flags);
|
||||
nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
|
||||
|
||||
/* if this channel is active, replace it with a null context */
|
||||
chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max;
|
||||
if (chid == chan->base.chid) {
|
||||
nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
|
||||
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
|
||||
nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
|
||||
|
||||
c = fifo->ramfc_desc;
|
||||
do {
|
||||
u32 rm = ((1ULL << c->bits) - 1) << c->regs;
|
||||
u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
|
||||
u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs;
|
||||
u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
|
||||
nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
|
||||
} while ((++c)->bits);
|
||||
|
||||
c = fifo->ramfc_desc;
|
||||
do {
|
||||
nvkm_wr32(device, c->regp, 0x00000000);
|
||||
} while ((++c)->bits);
|
||||
|
||||
nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0);
|
||||
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0);
|
||||
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max);
|
||||
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
|
||||
nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
|
||||
}
|
||||
|
||||
/* restore normal operation, after disabling dma mode */
|
||||
nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
|
||||
nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
|
||||
spin_unlock_irqrestore(&fifo->base.lock, flags);
|
||||
|
||||
return nvkm_fifo_channel_fini(&chan->base, suspend);
|
||||
}
|
||||
|
||||
int
|
||||
nv04_fifo_chan_init(struct nvkm_object *object)
|
||||
{
|
||||
struct nv04_fifo *fifo = (void *)object->engine;
|
||||
struct nv04_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
u32 mask = 1 << chan->base.chid;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_channel_init(&chan->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&fifo->base.lock, flags);
|
||||
nvkm_mask(device, NV04_PFIFO_MODE, mask, mask);
|
||||
spin_unlock_irqrestore(&fifo->base.lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv04_fifo_chan_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nv04_fifo *fifo = (void *)object->engine;
|
||||
struct nv04_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
struct ramfc_desc *c = fifo->ramfc_desc;
|
||||
|
||||
nvkm_kmap(imem->ramfc);
|
||||
do {
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + c->ctxp, 0x00000000);
|
||||
} while ((++c)->bits);
|
||||
nvkm_done(imem->ramfc);
|
||||
|
||||
nvkm_fifo_channel_destroy(&chan->base);
|
||||
}
|
||||
|
||||
static int
|
||||
nv04_fifo_chan_ctor(struct nvkm_object *parent,
|
||||
struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct nv03_channel_dma_v0 v0;
|
||||
} *args = data;
|
||||
struct nv04_fifo *fifo = (void *)engine;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
struct nv04_fifo_chan *chan;
|
||||
int ret;
|
||||
|
||||
nvif_ioctl(parent, "create channel dma size %d\n", size);
|
||||
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||
nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
|
||||
"offset %08x\n", args->v0.version,
|
||||
args->v0.pushbuf, args->v0.offset);
|
||||
} else
|
||||
return ret;
|
||||
|
||||
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
|
||||
0x10000, args->v0.pushbuf,
|
||||
(1ULL << NVDEV_ENGINE_DMAOBJ) |
|
||||
(1ULL << NVDEV_ENGINE_SW) |
|
||||
(1ULL << NVDEV_ENGINE_GR), &chan);
|
||||
*pobject = nv_object(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
args->v0.chid = chan->base.chid;
|
||||
|
||||
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
|
||||
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
|
||||
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
|
||||
chan->ramfc = chan->base.chid * 32;
|
||||
|
||||
nvkm_kmap(imem->ramfc);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x10,
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
|
||||
#ifdef __BIG_ENDIAN
|
||||
NV_PFIFO_CACHE1_BIG_ENDIAN |
|
||||
#endif
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
|
||||
nvkm_done(imem->ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nvkm_ofuncs
|
||||
nv04_fifo_ofuncs = {
|
||||
.ctor = nv04_fifo_chan_ctor,
|
||||
.dtor = nv04_fifo_chan_dtor,
|
||||
.init = nv04_fifo_chan_init,
|
||||
.fini = nv04_fifo_chan_fini,
|
||||
.map = _nvkm_fifo_channel_map,
|
||||
.rd32 = _nvkm_fifo_channel_rd32,
|
||||
.wr32 = _nvkm_fifo_channel_wr32,
|
||||
.ntfy = _nvkm_fifo_channel_ntfy
|
||||
};
|
||||
|
||||
struct nvkm_oclass
|
||||
nv04_fifo_sclass[] = {
|
||||
{ NV03_CHANNEL_DMA, &nv04_fifo_ofuncs },
|
||||
{}
|
||||
};
|
||||
|
||||
int
|
||||
nv04_fifo_context_ctor(struct nvkm_object *parent,
|
||||
struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nv04_fifo_base *base;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
|
||||
0x1000, NVOBJ_FLAG_HEAP, &base);
|
||||
*pobject = nv_object(base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_oclass
|
||||
nv04_fifo_cclass = {
|
||||
.handle = NV_ENGCTX(FIFO, 0x04),
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = nv04_fifo_context_ctor,
|
||||
.dtor = _nvkm_fifo_context_dtor,
|
||||
.init = _nvkm_fifo_context_init,
|
||||
.fini = _nvkm_fifo_context_fini,
|
||||
.rd32 = _nvkm_fifo_context_rd32,
|
||||
.wr32 = _nvkm_fifo_context_wr32,
|
||||
},
|
||||
};
|
102
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
Normal file
102
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
Normal file
@ -0,0 +1,102 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "channv04.h"
|
||||
#include "regsnv04.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <subdev/instmem.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
static int
|
||||
nv10_fifo_chan_ctor(struct nvkm_object *parent,
|
||||
struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct nv03_channel_dma_v0 v0;
|
||||
} *args = data;
|
||||
struct nv04_fifo *fifo = (void *)engine;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
struct nv04_fifo_chan *chan;
|
||||
int ret;
|
||||
|
||||
nvif_ioctl(parent, "create channel dma size %d\n", size);
|
||||
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||
nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
|
||||
"offset %08x\n", args->v0.version,
|
||||
args->v0.pushbuf, args->v0.offset);
|
||||
} else
|
||||
return ret;
|
||||
|
||||
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
|
||||
0x10000, args->v0.pushbuf,
|
||||
(1ULL << NVDEV_ENGINE_DMAOBJ) |
|
||||
(1ULL << NVDEV_ENGINE_SW) |
|
||||
(1ULL << NVDEV_ENGINE_GR), &chan);
|
||||
*pobject = nv_object(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
args->v0.chid = chan->base.chid;
|
||||
|
||||
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
|
||||
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
|
||||
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
|
||||
chan->ramfc = chan->base.chid * 32;
|
||||
|
||||
nvkm_kmap(imem->ramfc);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
|
||||
#ifdef __BIG_ENDIAN
|
||||
NV_PFIFO_CACHE1_BIG_ENDIAN |
|
||||
#endif
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
|
||||
nvkm_done(imem->ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nvkm_ofuncs
|
||||
nv10_fifo_ofuncs = {
|
||||
.ctor = nv10_fifo_chan_ctor,
|
||||
.dtor = nv04_fifo_chan_dtor,
|
||||
.init = nv04_fifo_chan_init,
|
||||
.fini = nv04_fifo_chan_fini,
|
||||
.map = _nvkm_fifo_channel_map,
|
||||
.rd32 = _nvkm_fifo_channel_rd32,
|
||||
.wr32 = _nvkm_fifo_channel_wr32,
|
||||
.ntfy = _nvkm_fifo_channel_ntfy
|
||||
};
|
||||
|
||||
struct nvkm_oclass
|
||||
nv10_fifo_sclass[] = {
|
||||
{ NV10_CHANNEL_DMA, &nv10_fifo_ofuncs },
|
||||
{}
|
||||
};
|
104
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
Normal file
104
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
Normal file
@ -0,0 +1,104 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "channv04.h"
|
||||
#include "regsnv04.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <subdev/instmem.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
static int
|
||||
nv17_fifo_chan_ctor(struct nvkm_object *parent,
|
||||
struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct nv03_channel_dma_v0 v0;
|
||||
} *args = data;
|
||||
struct nv04_fifo *fifo = (void *)engine;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
struct nv04_fifo_chan *chan;
|
||||
int ret;
|
||||
|
||||
nvif_ioctl(parent, "create channel dma size %d\n", size);
|
||||
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||
nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
|
||||
"offset %08x\n", args->v0.version,
|
||||
args->v0.pushbuf, args->v0.offset);
|
||||
} else
|
||||
return ret;
|
||||
|
||||
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
|
||||
0x10000, args->v0.pushbuf,
|
||||
(1ULL << NVDEV_ENGINE_DMAOBJ) |
|
||||
(1ULL << NVDEV_ENGINE_SW) |
|
||||
(1ULL << NVDEV_ENGINE_GR) |
|
||||
(1ULL << NVDEV_ENGINE_MPEG), /* NV31- */
|
||||
&chan);
|
||||
*pobject = nv_object(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
args->v0.chid = chan->base.chid;
|
||||
|
||||
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
|
||||
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
|
||||
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
|
||||
chan->ramfc = chan->base.chid * 64;
|
||||
|
||||
nvkm_kmap(imem->ramfc);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
|
||||
#ifdef __BIG_ENDIAN
|
||||
NV_PFIFO_CACHE1_BIG_ENDIAN |
|
||||
#endif
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
|
||||
nvkm_done(imem->ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nvkm_ofuncs
|
||||
nv17_fifo_ofuncs = {
|
||||
.ctor = nv17_fifo_chan_ctor,
|
||||
.dtor = nv04_fifo_chan_dtor,
|
||||
.init = nv04_fifo_chan_init,
|
||||
.fini = nv04_fifo_chan_fini,
|
||||
.map = _nvkm_fifo_channel_map,
|
||||
.rd32 = _nvkm_fifo_channel_rd32,
|
||||
.wr32 = _nvkm_fifo_channel_wr32,
|
||||
.ntfy = _nvkm_fifo_channel_ntfy
|
||||
};
|
||||
|
||||
struct nvkm_oclass
|
||||
nv17_fifo_sclass[] = {
|
||||
{ NV17_CHANNEL_DMA, &nv17_fifo_ofuncs },
|
||||
{}
|
||||
};
|
225
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
Normal file
225
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
Normal file
@ -0,0 +1,225 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "channv04.h"
|
||||
#include "regsnv04.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <core/ramht.h>
|
||||
#include <subdev/instmem.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
static int
|
||||
nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
|
||||
struct nvkm_object *engctx)
|
||||
{
|
||||
struct nv04_fifo *fifo = (void *)parent->engine;
|
||||
struct nv04_fifo_chan *chan = (void *)parent;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
struct nvkm_instmem *imem = device->imem;
|
||||
unsigned long flags;
|
||||
u32 reg, ctx;
|
||||
|
||||
switch (nv_engidx(engctx->engine)) {
|
||||
case NVDEV_ENGINE_SW:
|
||||
return 0;
|
||||
case NVDEV_ENGINE_GR:
|
||||
reg = 0x32e0;
|
||||
ctx = 0x38;
|
||||
break;
|
||||
case NVDEV_ENGINE_MPEG:
|
||||
reg = 0x330c;
|
||||
ctx = 0x54;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&fifo->base.lock, flags);
|
||||
nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
|
||||
|
||||
if ((nvkm_rd32(device, 0x003204) & fifo->base.max) == chan->base.chid)
|
||||
nvkm_wr32(device, reg, 0x00000000);
|
||||
nvkm_kmap(imem->ramfc);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + ctx, 0x00000000);
|
||||
nvkm_done(imem->ramfc);
|
||||
|
||||
nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
|
||||
spin_unlock_irqrestore(&fifo->base.lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
|
||||
{
|
||||
struct nv04_fifo *fifo = (void *)parent->engine;
|
||||
struct nv04_fifo_chan *chan = (void *)parent;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
struct nvkm_instmem *imem = device->imem;
|
||||
unsigned long flags;
|
||||
u32 reg, ctx;
|
||||
|
||||
switch (nv_engidx(engctx->engine)) {
|
||||
case NVDEV_ENGINE_SW:
|
||||
return 0;
|
||||
case NVDEV_ENGINE_GR:
|
||||
reg = 0x32e0;
|
||||
ctx = 0x38;
|
||||
break;
|
||||
case NVDEV_ENGINE_MPEG:
|
||||
reg = 0x330c;
|
||||
ctx = 0x54;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&fifo->base.lock, flags);
|
||||
nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4;
|
||||
nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
|
||||
|
||||
if ((nvkm_rd32(device, 0x003204) & fifo->base.max) == chan->base.chid)
|
||||
nvkm_wr32(device, reg, nv_engctx(engctx)->addr);
|
||||
nvkm_kmap(imem->ramfc);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
|
||||
nvkm_done(imem->ramfc);
|
||||
|
||||
nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
|
||||
spin_unlock_irqrestore(&fifo->base.lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv40_fifo_object_attach(struct nvkm_object *parent,
|
||||
struct nvkm_object *object, u32 handle)
|
||||
{
|
||||
struct nv04_fifo *fifo = (void *)parent->engine;
|
||||
struct nv04_fifo_chan *chan = (void *)parent;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
u32 context, chid = chan->base.chid;
|
||||
int ret;
|
||||
|
||||
if (nv_iclass(object, NV_GPUOBJ_CLASS))
|
||||
context = nv_gpuobj(object)->addr >> 4;
|
||||
else
|
||||
context = 0x00000004; /* just non-zero */
|
||||
|
||||
if (object->engine) {
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_DMAOBJ:
|
||||
case NVDEV_ENGINE_SW:
|
||||
context |= 0x00000000;
|
||||
break;
|
||||
case NVDEV_ENGINE_GR:
|
||||
context |= 0x00100000;
|
||||
break;
|
||||
case NVDEV_ENGINE_MPEG:
|
||||
context |= 0x00200000;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
context |= chid << 23;
|
||||
|
||||
mutex_lock(&nv_subdev(fifo)->mutex);
|
||||
ret = nvkm_ramht_insert(imem->ramht, NULL, chid, 0, handle, context);
|
||||
mutex_unlock(&nv_subdev(fifo)->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct nv03_channel_dma_v0 v0;
|
||||
} *args = data;
|
||||
struct nv04_fifo *fifo = (void *)engine;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
struct nv04_fifo_chan *chan;
|
||||
int ret;
|
||||
|
||||
nvif_ioctl(parent, "create channel dma size %d\n", size);
|
||||
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||
nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
|
||||
"offset %08x\n", args->v0.version,
|
||||
args->v0.pushbuf, args->v0.offset);
|
||||
} else
|
||||
return ret;
|
||||
|
||||
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
|
||||
0x1000, args->v0.pushbuf,
|
||||
(1ULL << NVDEV_ENGINE_DMAOBJ) |
|
||||
(1ULL << NVDEV_ENGINE_SW) |
|
||||
(1ULL << NVDEV_ENGINE_GR) |
|
||||
(1ULL << NVDEV_ENGINE_MPEG), &chan);
|
||||
*pobject = nv_object(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
args->v0.chid = chan->base.chid;
|
||||
|
||||
nv_parent(chan)->context_attach = nv40_fifo_context_attach;
|
||||
nv_parent(chan)->context_detach = nv40_fifo_context_detach;
|
||||
nv_parent(chan)->object_attach = nv40_fifo_object_attach;
|
||||
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
|
||||
chan->ramfc = chan->base.chid * 128;
|
||||
|
||||
nvkm_kmap(imem->ramfc);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x18, 0x30000000 |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
|
||||
#ifdef __BIG_ENDIAN
|
||||
NV_PFIFO_CACHE1_BIG_ENDIAN |
|
||||
#endif
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
|
||||
nvkm_done(imem->ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nvkm_ofuncs
|
||||
nv40_fifo_ofuncs = {
|
||||
.ctor = nv40_fifo_chan_ctor,
|
||||
.dtor = nv04_fifo_chan_dtor,
|
||||
.init = nv04_fifo_chan_init,
|
||||
.fini = nv04_fifo_chan_fini,
|
||||
.map = _nvkm_fifo_channel_map,
|
||||
.rd32 = _nvkm_fifo_channel_rd32,
|
||||
.wr32 = _nvkm_fifo_channel_wr32,
|
||||
.ntfy = _nvkm_fifo_channel_ntfy
|
||||
};
|
||||
|
||||
struct nvkm_oclass
|
||||
nv40_fifo_sclass[] = {
|
||||
{ NV40_CHANNEL_DMA, &nv40_fifo_ofuncs },
|
||||
{}
|
||||
};
|
115
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
Normal file
115
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
Normal file
@ -0,0 +1,115 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "channv50.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <core/ramht.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
static int
|
||||
nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct nv50_channel_dma_v0 v0;
|
||||
} *args = data;
|
||||
struct nvkm_device *device = parent->engine->subdev.device;
|
||||
struct nv50_fifo_base *base = (void *)parent;
|
||||
struct nv50_fifo_chan *chan;
|
||||
int ret;
|
||||
|
||||
nvif_ioctl(parent, "create channel dma size %d\n", size);
|
||||
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||
nvif_ioctl(parent, "create channel dma vers %d vm %llx "
|
||||
"pushbuf %llx offset %016llx\n",
|
||||
args->v0.version, args->v0.vm, args->v0.pushbuf,
|
||||
args->v0.offset);
|
||||
if (args->v0.vm)
|
||||
return -ENOENT;
|
||||
} else
|
||||
return ret;
|
||||
|
||||
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
|
||||
0x2000, args->v0.pushbuf,
|
||||
(1ULL << NVDEV_ENGINE_DMAOBJ) |
|
||||
(1ULL << NVDEV_ENGINE_SW) |
|
||||
(1ULL << NVDEV_ENGINE_GR) |
|
||||
(1ULL << NVDEV_ENGINE_MPEG), &chan);
|
||||
*pobject = nv_object(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
chan->base.inst = base->base.gpuobj.addr;
|
||||
args->v0.chid = chan->base.chid;
|
||||
|
||||
nv_parent(chan)->context_attach = nv50_fifo_context_attach;
|
||||
nv_parent(chan)->context_detach = nv50_fifo_context_detach;
|
||||
nv_parent(chan)->object_attach = nv50_fifo_object_attach;
|
||||
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
|
||||
|
||||
ret = nvkm_ramht_new(device, 0x8000, 16, &base->base.gpuobj,
|
||||
&chan->ramht);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_kmap(base->ramfc);
|
||||
nvkm_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
|
||||
nvkm_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
|
||||
nvkm_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
|
||||
nvkm_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
|
||||
nvkm_wo32(base->ramfc, 0x3c, 0x003f6078);
|
||||
nvkm_wo32(base->ramfc, 0x44, 0x01003fff);
|
||||
nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
|
||||
nvkm_wo32(base->ramfc, 0x4c, 0xffffffff);
|
||||
nvkm_wo32(base->ramfc, 0x60, 0x7fffffff);
|
||||
nvkm_wo32(base->ramfc, 0x78, 0x00000000);
|
||||
nvkm_wo32(base->ramfc, 0x7c, 0x30000001);
|
||||
nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
|
||||
(4 << 24) /* SEARCH_FULL */ |
|
||||
(chan->ramht->gpuobj->node->offset >> 4));
|
||||
nvkm_done(base->ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nvkm_ofuncs
|
||||
nv50_fifo_ofuncs_dma = {
|
||||
.ctor = nv50_fifo_chan_ctor_dma,
|
||||
.dtor = nv50_fifo_chan_dtor,
|
||||
.init = nv50_fifo_chan_init,
|
||||
.fini = nv50_fifo_chan_fini,
|
||||
.map = _nvkm_fifo_channel_map,
|
||||
.rd32 = _nvkm_fifo_channel_rd32,
|
||||
.wr32 = _nvkm_fifo_channel_wr32,
|
||||
.ntfy = _nvkm_fifo_channel_ntfy
|
||||
};
|
||||
|
||||
struct nvkm_oclass
|
||||
nv50_fifo_sclass[] = {
|
||||
{ NV50_CHANNEL_DMA, &nv50_fifo_ofuncs_dma },
|
||||
{ NV50_CHANNEL_GPFIFO, &nv50_fifo_ofuncs_ind },
|
||||
{}
|
||||
};
|
@ -22,425 +22,7 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "nv50.h"
|
||||
#include "nv04.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <core/engctx.h>
|
||||
#include <core/ramht.h>
|
||||
#include <subdev/mmu.h>
|
||||
#include <subdev/timer.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
/*******************************************************************************
|
||||
* FIFO channel objects
|
||||
******************************************************************************/
|
||||
|
||||
static int
|
||||
g84_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
|
||||
{
|
||||
struct nv50_fifo_base *base = (void *)parent->parent;
|
||||
struct nvkm_gpuobj *ectx = (void *)object;
|
||||
u64 limit = ectx->addr + ectx->size - 1;
|
||||
u64 start = ectx->addr;
|
||||
u32 addr;
|
||||
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_SW : return 0;
|
||||
case NVDEV_ENGINE_GR : addr = 0x0020; break;
|
||||
case NVDEV_ENGINE_VP :
|
||||
case NVDEV_ENGINE_MSPDEC: addr = 0x0040; break;
|
||||
case NVDEV_ENGINE_MSPPP :
|
||||
case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
|
||||
case NVDEV_ENGINE_BSP :
|
||||
case NVDEV_ENGINE_MSVLD : addr = 0x0080; break;
|
||||
case NVDEV_ENGINE_CIPHER:
|
||||
case NVDEV_ENGINE_SEC : addr = 0x00a0; break;
|
||||
case NVDEV_ENGINE_CE0 : addr = 0x00c0; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
|
||||
nvkm_kmap(base->eng);
|
||||
nvkm_wo32(base->eng, addr + 0x00, 0x00190000);
|
||||
nvkm_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
|
||||
nvkm_wo32(base->eng, addr + 0x08, lower_32_bits(start));
|
||||
nvkm_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
|
||||
upper_32_bits(start));
|
||||
nvkm_wo32(base->eng, addr + 0x10, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x14, 0x00000000);
|
||||
nvkm_done(base->eng);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
g84_fifo_context_detach(struct nvkm_object *parent, bool suspend,
|
||||
struct nvkm_object *object)
|
||||
{
|
||||
struct nv50_fifo *fifo = (void *)parent->engine;
|
||||
struct nv50_fifo_base *base = (void *)parent->parent;
|
||||
struct nv50_fifo_chan *chan = (void *)parent;
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 addr, save, engn;
|
||||
bool done;
|
||||
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_SW : return 0;
|
||||
case NVDEV_ENGINE_GR : engn = 0; addr = 0x0020; break;
|
||||
case NVDEV_ENGINE_VP :
|
||||
case NVDEV_ENGINE_MSPDEC: engn = 3; addr = 0x0040; break;
|
||||
case NVDEV_ENGINE_MSPPP :
|
||||
case NVDEV_ENGINE_MPEG : engn = 1; addr = 0x0060; break;
|
||||
case NVDEV_ENGINE_BSP :
|
||||
case NVDEV_ENGINE_MSVLD : engn = 5; addr = 0x0080; break;
|
||||
case NVDEV_ENGINE_CIPHER:
|
||||
case NVDEV_ENGINE_SEC : engn = 4; addr = 0x00a0; break;
|
||||
case NVDEV_ENGINE_CE0 : engn = 2; addr = 0x00c0; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn);
|
||||
nvkm_wr32(device, 0x0032fc, nv_gpuobj(base)->addr >> 12);
|
||||
done = nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
|
||||
break;
|
||||
) >= 0;
|
||||
nvkm_wr32(device, 0x002520, save);
|
||||
if (!done) {
|
||||
nvkm_error(subdev, "channel %d [%s] unload timeout\n",
|
||||
chan->base.chid, nvkm_client_name(chan));
|
||||
if (suspend)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
nvkm_kmap(base->eng);
|
||||
nvkm_wo32(base->eng, addr + 0x00, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x04, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x08, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x0c, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x10, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x14, 0x00000000);
|
||||
nvkm_done(base->eng);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
g84_fifo_object_attach(struct nvkm_object *parent,
|
||||
struct nvkm_object *object, u32 handle)
|
||||
{
|
||||
struct nv50_fifo_chan *chan = (void *)parent;
|
||||
u32 context;
|
||||
|
||||
if (nv_iclass(object, NV_GPUOBJ_CLASS))
|
||||
context = nv_gpuobj(object)->node->offset >> 4;
|
||||
else
|
||||
context = 0x00000004; /* just non-zero */
|
||||
|
||||
if (object->engine) {
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_DMAOBJ:
|
||||
case NVDEV_ENGINE_SW : context |= 0x00000000; break;
|
||||
case NVDEV_ENGINE_GR : context |= 0x00100000; break;
|
||||
case NVDEV_ENGINE_MPEG :
|
||||
case NVDEV_ENGINE_MSPPP : context |= 0x00200000; break;
|
||||
case NVDEV_ENGINE_ME :
|
||||
case NVDEV_ENGINE_CE0 : context |= 0x00300000; break;
|
||||
case NVDEV_ENGINE_VP :
|
||||
case NVDEV_ENGINE_MSPDEC: context |= 0x00400000; break;
|
||||
case NVDEV_ENGINE_CIPHER:
|
||||
case NVDEV_ENGINE_SEC :
|
||||
case NVDEV_ENGINE_VIC : context |= 0x00500000; break;
|
||||
case NVDEV_ENGINE_BSP :
|
||||
case NVDEV_ENGINE_MSVLD : context |= 0x00600000; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return nvkm_ramht_insert(chan->ramht, NULL, 0, 0, handle, context);
|
||||
}
|
||||
|
||||
static int
|
||||
g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct nv50_channel_dma_v0 v0;
|
||||
} *args = data;
|
||||
struct nvkm_device *device = parent->engine->subdev.device;
|
||||
struct nv50_fifo_base *base = (void *)parent;
|
||||
struct nv50_fifo_chan *chan;
|
||||
int ret;
|
||||
|
||||
nvif_ioctl(parent, "create channel dma size %d\n", size);
|
||||
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||
nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
|
||||
"offset %016llx\n", args->v0.version,
|
||||
args->v0.pushbuf, args->v0.offset);
|
||||
if (args->v0.vm)
|
||||
return -ENOENT;
|
||||
} else
|
||||
return ret;
|
||||
|
||||
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
|
||||
0x2000, args->v0.pushbuf,
|
||||
(1ULL << NVDEV_ENGINE_DMAOBJ) |
|
||||
(1ULL << NVDEV_ENGINE_SW) |
|
||||
(1ULL << NVDEV_ENGINE_GR) |
|
||||
(1ULL << NVDEV_ENGINE_MPEG) |
|
||||
(1ULL << NVDEV_ENGINE_ME) |
|
||||
(1ULL << NVDEV_ENGINE_VP) |
|
||||
(1ULL << NVDEV_ENGINE_CIPHER) |
|
||||
(1ULL << NVDEV_ENGINE_SEC) |
|
||||
(1ULL << NVDEV_ENGINE_BSP) |
|
||||
(1ULL << NVDEV_ENGINE_MSVLD) |
|
||||
(1ULL << NVDEV_ENGINE_MSPDEC) |
|
||||
(1ULL << NVDEV_ENGINE_MSPPP) |
|
||||
(1ULL << NVDEV_ENGINE_CE0) |
|
||||
(1ULL << NVDEV_ENGINE_VIC), &chan);
|
||||
*pobject = nv_object(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
chan->base.inst = base->base.gpuobj.addr;
|
||||
args->v0.chid = chan->base.chid;
|
||||
|
||||
ret = nvkm_ramht_new(device, 0x8000, 16, &base->base.gpuobj,
|
||||
&chan->ramht);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_parent(chan)->context_attach = g84_fifo_context_attach;
|
||||
nv_parent(chan)->context_detach = g84_fifo_context_detach;
|
||||
nv_parent(chan)->object_attach = g84_fifo_object_attach;
|
||||
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
|
||||
|
||||
nvkm_kmap(base->ramfc);
|
||||
nvkm_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
|
||||
nvkm_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
|
||||
nvkm_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
|
||||
nvkm_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
|
||||
nvkm_wo32(base->ramfc, 0x3c, 0x003f6078);
|
||||
nvkm_wo32(base->ramfc, 0x44, 0x01003fff);
|
||||
nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
|
||||
nvkm_wo32(base->ramfc, 0x4c, 0xffffffff);
|
||||
nvkm_wo32(base->ramfc, 0x60, 0x7fffffff);
|
||||
nvkm_wo32(base->ramfc, 0x78, 0x00000000);
|
||||
nvkm_wo32(base->ramfc, 0x7c, 0x30000001);
|
||||
nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
|
||||
(4 << 24) /* SEARCH_FULL */ |
|
||||
(chan->ramht->gpuobj->node->offset >> 4));
|
||||
nvkm_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
|
||||
nvkm_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
|
||||
nvkm_done(base->ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct nv50_channel_gpfifo_v0 v0;
|
||||
} *args = data;
|
||||
struct nvkm_device *device = parent->engine->subdev.device;
|
||||
struct nv50_fifo_base *base = (void *)parent;
|
||||
struct nv50_fifo_chan *chan;
|
||||
u64 ioffset, ilength;
|
||||
int ret;
|
||||
|
||||
nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
|
||||
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||
nvif_ioctl(parent, "create channel gpfifo vers %d pushbuf %llx "
|
||||
"ioffset %016llx ilength %08x\n",
|
||||
args->v0.version, args->v0.pushbuf, args->v0.ioffset,
|
||||
args->v0.ilength);
|
||||
if (args->v0.vm)
|
||||
return -ENOENT;
|
||||
} else
|
||||
return ret;
|
||||
|
||||
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
|
||||
0x2000, args->v0.pushbuf,
|
||||
(1ULL << NVDEV_ENGINE_DMAOBJ) |
|
||||
(1ULL << NVDEV_ENGINE_SW) |
|
||||
(1ULL << NVDEV_ENGINE_GR) |
|
||||
(1ULL << NVDEV_ENGINE_MPEG) |
|
||||
(1ULL << NVDEV_ENGINE_ME) |
|
||||
(1ULL << NVDEV_ENGINE_VP) |
|
||||
(1ULL << NVDEV_ENGINE_CIPHER) |
|
||||
(1ULL << NVDEV_ENGINE_SEC) |
|
||||
(1ULL << NVDEV_ENGINE_BSP) |
|
||||
(1ULL << NVDEV_ENGINE_MSVLD) |
|
||||
(1ULL << NVDEV_ENGINE_MSPDEC) |
|
||||
(1ULL << NVDEV_ENGINE_MSPPP) |
|
||||
(1ULL << NVDEV_ENGINE_CE0) |
|
||||
(1ULL << NVDEV_ENGINE_VIC), &chan);
|
||||
*pobject = nv_object(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
chan->base.inst = base->base.gpuobj.addr;
|
||||
args->v0.chid = chan->base.chid;
|
||||
|
||||
ret = nvkm_ramht_new(device, 0x8000, 16, &base->base.gpuobj,
|
||||
&chan->ramht);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_parent(chan)->context_attach = g84_fifo_context_attach;
|
||||
nv_parent(chan)->context_detach = g84_fifo_context_detach;
|
||||
nv_parent(chan)->object_attach = g84_fifo_object_attach;
|
||||
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
|
||||
|
||||
ioffset = args->v0.ioffset;
|
||||
ilength = order_base_2(args->v0.ilength / 8);
|
||||
|
||||
nvkm_kmap(base->ramfc);
|
||||
nvkm_wo32(base->ramfc, 0x3c, 0x403f6078);
|
||||
nvkm_wo32(base->ramfc, 0x44, 0x01003fff);
|
||||
nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
|
||||
nvkm_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
|
||||
nvkm_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
|
||||
nvkm_wo32(base->ramfc, 0x60, 0x7fffffff);
|
||||
nvkm_wo32(base->ramfc, 0x78, 0x00000000);
|
||||
nvkm_wo32(base->ramfc, 0x7c, 0x30000001);
|
||||
nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
|
||||
(4 << 24) /* SEARCH_FULL */ |
|
||||
(chan->ramht->gpuobj->node->offset >> 4));
|
||||
nvkm_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
|
||||
nvkm_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
|
||||
nvkm_done(base->ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
g84_fifo_chan_init(struct nvkm_object *object)
|
||||
{
|
||||
struct nv50_fifo *fifo = (void *)object->engine;
|
||||
struct nv50_fifo_base *base = (void *)object->parent;
|
||||
struct nv50_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_gpuobj *ramfc = base->ramfc;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
u32 chid = chan->base.chid;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_channel_init(&chan->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8);
|
||||
nv50_fifo_playlist_update(fifo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nvkm_ofuncs
|
||||
g84_fifo_ofuncs_dma = {
|
||||
.ctor = g84_fifo_chan_ctor_dma,
|
||||
.dtor = nv50_fifo_chan_dtor,
|
||||
.init = g84_fifo_chan_init,
|
||||
.fini = nv50_fifo_chan_fini,
|
||||
.map = _nvkm_fifo_channel_map,
|
||||
.rd32 = _nvkm_fifo_channel_rd32,
|
||||
.wr32 = _nvkm_fifo_channel_wr32,
|
||||
.ntfy = _nvkm_fifo_channel_ntfy
|
||||
};
|
||||
|
||||
static struct nvkm_ofuncs
|
||||
g84_fifo_ofuncs_ind = {
|
||||
.ctor = g84_fifo_chan_ctor_ind,
|
||||
.dtor = nv50_fifo_chan_dtor,
|
||||
.init = g84_fifo_chan_init,
|
||||
.fini = nv50_fifo_chan_fini,
|
||||
.map = _nvkm_fifo_channel_map,
|
||||
.rd32 = _nvkm_fifo_channel_rd32,
|
||||
.wr32 = _nvkm_fifo_channel_wr32,
|
||||
.ntfy = _nvkm_fifo_channel_ntfy
|
||||
};
|
||||
|
||||
static struct nvkm_oclass
|
||||
g84_fifo_sclass[] = {
|
||||
{ G82_CHANNEL_DMA, &g84_fifo_ofuncs_dma },
|
||||
{ G82_CHANNEL_GPFIFO, &g84_fifo_ofuncs_ind },
|
||||
{}
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* FIFO context - basically just the instmem reserved for the channel
|
||||
******************************************************************************/
|
||||
|
||||
static int
|
||||
g84_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_device *device = nv_engine(engine)->subdev.device;
|
||||
struct nv50_fifo_base *base;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
|
||||
0x1000, NVOBJ_FLAG_HEAP, &base);
|
||||
*pobject = nv_object(base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(device, 0x0200, 0, true, &base->base.gpuobj,
|
||||
&base->eng);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(device, 0x4000, 0, false, &base->base.gpuobj,
|
||||
&base->pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(device, 0x1000, 0x400, true, &base->base.gpuobj,
|
||||
&base->cache);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(device, 0x100, 0x100, true, &base->base.gpuobj,
|
||||
&base->ramfc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nvkm_oclass
|
||||
g84_fifo_cclass = {
|
||||
.handle = NV_ENGCTX(FIFO, 0x84),
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = g84_fifo_context_ctor,
|
||||
.dtor = nv50_fifo_context_dtor,
|
||||
.init = _nvkm_fifo_context_init,
|
||||
.fini = _nvkm_fifo_context_fini,
|
||||
.rd32 = _nvkm_fifo_context_rd32,
|
||||
.wr32 = _nvkm_fifo_context_wr32,
|
||||
},
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* PFIFO engine
|
||||
******************************************************************************/
|
||||
|
||||
static void
|
||||
g84_fifo_uevent_init(struct nvkm_event *event, int type, int index)
|
||||
{
|
||||
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
|
||||
struct nvkm_device *device = fifo->engine.subdev.device;
|
||||
nvkm_mask(device, 0x002140, 0x40000000, 0x40000000);
|
||||
}
|
||||
#include "channv50.h"
|
||||
|
||||
static void
|
||||
g84_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
|
||||
@ -450,6 +32,14 @@ g84_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
|
||||
nvkm_mask(device, 0x002140, 0x40000000, 0x00000000);
|
||||
}
|
||||
|
||||
static void
|
||||
g84_fifo_uevent_init(struct nvkm_event *event, int type, int index)
|
||||
{
|
||||
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
|
||||
struct nvkm_device *device = fifo->engine.subdev.device;
|
||||
nvkm_mask(device, 0x002140, 0x40000000, 0x40000000);
|
||||
}
|
||||
|
||||
static const struct nvkm_event_func
|
||||
g84_fifo_uevent_func = {
|
||||
.ctor = nvkm_fifo_uevent_ctor,
|
||||
@ -472,12 +62,12 @@ g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
return ret;
|
||||
|
||||
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
|
||||
false, &fifo->playlist[0]);
|
||||
false, &fifo->runlist[0]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
|
||||
false, &fifo->playlist[1]);
|
||||
false, &fifo->runlist[1]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -21,61 +21,41 @@
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include <engine/fifo.h>
|
||||
#include "gf100.h"
|
||||
#include "changf100.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <core/engctx.h>
|
||||
#include <core/enum.h>
|
||||
#include <core/handle.h>
|
||||
#include <subdev/bar.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/mmu.h>
|
||||
#include <subdev/timer.h>
|
||||
#include <engine/sw.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/ioctl.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
struct gf100_fifo {
|
||||
struct nvkm_fifo base;
|
||||
|
||||
struct work_struct fault;
|
||||
u64 mask;
|
||||
|
||||
struct {
|
||||
struct nvkm_memory *mem[2];
|
||||
int active;
|
||||
wait_queue_head_t wait;
|
||||
} runlist;
|
||||
|
||||
struct {
|
||||
struct nvkm_memory *mem;
|
||||
struct nvkm_vma bar;
|
||||
} user;
|
||||
int spoon_nr;
|
||||
};
|
||||
|
||||
struct gf100_fifo_base {
|
||||
struct nvkm_fifo_base base;
|
||||
struct nvkm_gpuobj *pgd;
|
||||
struct nvkm_vm *vm;
|
||||
};
|
||||
|
||||
struct gf100_fifo_chan {
|
||||
struct nvkm_fifo_chan base;
|
||||
enum {
|
||||
STOPPED,
|
||||
RUNNING,
|
||||
KILLED
|
||||
} state;
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* FIFO channel objects
|
||||
******************************************************************************/
|
||||
|
||||
static void
|
||||
gf100_fifo_uevent_init(struct nvkm_event *event, int type, int index)
|
||||
{
|
||||
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
|
||||
struct nvkm_device *device = fifo->engine.subdev.device;
|
||||
nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
|
||||
{
|
||||
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
|
||||
struct nvkm_device *device = fifo->engine.subdev.device;
|
||||
nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
|
||||
}
|
||||
|
||||
static const struct nvkm_event_func
|
||||
gf100_fifo_uevent_func = {
|
||||
.ctor = nvkm_fifo_uevent_ctor,
|
||||
.init = gf100_fifo_uevent_init,
|
||||
.fini = gf100_fifo_uevent_fini,
|
||||
};
|
||||
|
||||
void
|
||||
gf100_fifo_runlist_update(struct gf100_fifo *fifo)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
@ -108,289 +88,6 @@ gf100_fifo_runlist_update(struct gf100_fifo *fifo)
|
||||
mutex_unlock(&nv_subdev(fifo)->mutex);
|
||||
}
|
||||
|
||||
static int
|
||||
gf100_fifo_context_attach(struct nvkm_object *parent,
|
||||
struct nvkm_object *object)
|
||||
{
|
||||
struct gf100_fifo_base *base = (void *)parent->parent;
|
||||
struct nvkm_gpuobj *engn = &base->base.gpuobj;
|
||||
struct nvkm_engctx *ectx = (void *)object;
|
||||
u32 addr;
|
||||
int ret;
|
||||
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_SW : return 0;
|
||||
case NVDEV_ENGINE_GR : addr = 0x0210; break;
|
||||
case NVDEV_ENGINE_CE0 : addr = 0x0230; break;
|
||||
case NVDEV_ENGINE_CE1 : addr = 0x0240; break;
|
||||
case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
|
||||
case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
|
||||
case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!ectx->vma.node) {
|
||||
ret = nvkm_gpuobj_map(nv_gpuobj(ectx), base->vm,
|
||||
NV_MEM_ACCESS_RW, &ectx->vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
|
||||
}
|
||||
|
||||
nvkm_kmap(engn);
|
||||
nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
|
||||
nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset));
|
||||
nvkm_done(engn);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
|
||||
struct nvkm_object *object)
|
||||
{
|
||||
struct gf100_fifo *fifo = (void *)parent->engine;
|
||||
struct gf100_fifo_base *base = (void *)parent->parent;
|
||||
struct gf100_fifo_chan *chan = (void *)parent;
|
||||
struct nvkm_gpuobj *engn = &base->base.gpuobj;
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 addr;
|
||||
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_SW : return 0;
|
||||
case NVDEV_ENGINE_GR : addr = 0x0210; break;
|
||||
case NVDEV_ENGINE_CE0 : addr = 0x0230; break;
|
||||
case NVDEV_ENGINE_CE1 : addr = 0x0240; break;
|
||||
case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
|
||||
case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
|
||||
case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nvkm_wr32(device, 0x002634, chan->base.chid);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x002634) == chan->base.chid)
|
||||
break;
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "channel %d [%s] kick timeout\n",
|
||||
chan->base.chid, nvkm_client_name(chan));
|
||||
if (suspend)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
nvkm_kmap(engn);
|
||||
nvkm_wo32(engn, addr + 0x00, 0x00000000);
|
||||
nvkm_wo32(engn, addr + 0x04, 0x00000000);
|
||||
nvkm_done(engn);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct fermi_channel_gpfifo_v0 v0;
|
||||
} *args = data;
|
||||
struct gf100_fifo *fifo = (void *)engine;
|
||||
struct gf100_fifo_base *base = (void *)parent;
|
||||
struct gf100_fifo_chan *chan;
|
||||
struct nvkm_gpuobj *ramfc = &base->base.gpuobj;
|
||||
u64 usermem, ioffset, ilength;
|
||||
int ret, i;
|
||||
|
||||
nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
|
||||
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||
nvif_ioctl(parent, "create channel gpfifo vers %d "
|
||||
"ioffset %016llx ilength %08x\n",
|
||||
args->v0.version, args->v0.ioffset,
|
||||
args->v0.ilength);
|
||||
if (args->v0.vm)
|
||||
return -ENOENT;
|
||||
} else
|
||||
return ret;
|
||||
|
||||
ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
|
||||
fifo->user.bar.offset, 0x1000, 0,
|
||||
(1ULL << NVDEV_ENGINE_SW) |
|
||||
(1ULL << NVDEV_ENGINE_GR) |
|
||||
(1ULL << NVDEV_ENGINE_CE0) |
|
||||
(1ULL << NVDEV_ENGINE_CE1) |
|
||||
(1ULL << NVDEV_ENGINE_MSVLD) |
|
||||
(1ULL << NVDEV_ENGINE_MSPDEC) |
|
||||
(1ULL << NVDEV_ENGINE_MSPPP), &chan);
|
||||
*pobject = nv_object(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
chan->base.inst = base->base.gpuobj.addr;
|
||||
args->v0.chid = chan->base.chid;
|
||||
|
||||
nv_parent(chan)->context_attach = gf100_fifo_context_attach;
|
||||
nv_parent(chan)->context_detach = gf100_fifo_context_detach;
|
||||
|
||||
usermem = chan->base.chid * 0x1000;
|
||||
ioffset = args->v0.ioffset;
|
||||
ilength = order_base_2(args->v0.ilength / 8);
|
||||
|
||||
nvkm_kmap(fifo->user.mem);
|
||||
for (i = 0; i < 0x1000; i += 4)
|
||||
nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
|
||||
nvkm_done(fifo->user.mem);
|
||||
usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
|
||||
|
||||
nvkm_kmap(ramfc);
|
||||
nvkm_wo32(ramfc, 0x08, lower_32_bits(usermem));
|
||||
nvkm_wo32(ramfc, 0x0c, upper_32_bits(usermem));
|
||||
nvkm_wo32(ramfc, 0x10, 0x0000face);
|
||||
nvkm_wo32(ramfc, 0x30, 0xfffff902);
|
||||
nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
|
||||
nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
|
||||
nvkm_wo32(ramfc, 0x54, 0x00000002);
|
||||
nvkm_wo32(ramfc, 0x84, 0x20400000);
|
||||
nvkm_wo32(ramfc, 0x94, 0x30000001);
|
||||
nvkm_wo32(ramfc, 0x9c, 0x00000100);
|
||||
nvkm_wo32(ramfc, 0xa4, 0x1f1f1f1f);
|
||||
nvkm_wo32(ramfc, 0xa8, 0x1f1f1f1f);
|
||||
nvkm_wo32(ramfc, 0xac, 0x0000001f);
|
||||
nvkm_wo32(ramfc, 0xb8, 0xf8000000);
|
||||
nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */
|
||||
nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */
|
||||
nvkm_done(ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gf100_fifo_chan_init(struct nvkm_object *object)
|
||||
{
|
||||
struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
|
||||
struct gf100_fifo *fifo = (void *)object->engine;
|
||||
struct gf100_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
u32 chid = chan->base.chid;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_channel_init(&chan->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_wr32(device, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
|
||||
|
||||
if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
|
||||
nvkm_wr32(device, 0x003004 + (chid * 8), 0x001f0001);
|
||||
gf100_fifo_runlist_update(fifo);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gf100_fifo_intr_engine(struct gf100_fifo *fifo);
|
||||
|
||||
static int
|
||||
gf100_fifo_chan_fini(struct nvkm_object *object, bool suspend)
|
||||
{
|
||||
struct gf100_fifo *fifo = (void *)object->engine;
|
||||
struct gf100_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
u32 chid = chan->base.chid;
|
||||
|
||||
if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
|
||||
nvkm_mask(device, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
|
||||
gf100_fifo_runlist_update(fifo);
|
||||
}
|
||||
|
||||
gf100_fifo_intr_engine(fifo);
|
||||
|
||||
nvkm_wr32(device, 0x003000 + (chid * 8), 0x00000000);
|
||||
return nvkm_fifo_channel_fini(&chan->base, suspend);
|
||||
}
|
||||
|
||||
static struct nvkm_ofuncs
|
||||
gf100_fifo_ofuncs = {
|
||||
.ctor = gf100_fifo_chan_ctor,
|
||||
.dtor = _nvkm_fifo_channel_dtor,
|
||||
.init = gf100_fifo_chan_init,
|
||||
.fini = gf100_fifo_chan_fini,
|
||||
.map = _nvkm_fifo_channel_map,
|
||||
.rd32 = _nvkm_fifo_channel_rd32,
|
||||
.wr32 = _nvkm_fifo_channel_wr32,
|
||||
.ntfy = _nvkm_fifo_channel_ntfy
|
||||
};
|
||||
|
||||
static struct nvkm_oclass
|
||||
gf100_fifo_sclass[] = {
|
||||
{ FERMI_CHANNEL_GPFIFO, &gf100_fifo_ofuncs },
|
||||
{}
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* FIFO context - instmem heap and vm setup
|
||||
******************************************************************************/
|
||||
|
||||
static int
|
||||
gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_device *device = nv_engine(engine)->subdev.device;
|
||||
struct gf100_fifo_base *base;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
|
||||
0x1000, NVOBJ_FLAG_ZERO_ALLOC |
|
||||
NVOBJ_FLAG_HEAP, &base);
|
||||
*pobject = nv_object(base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &base->pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_kmap(&base->base.gpuobj);
|
||||
nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr));
|
||||
nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr));
|
||||
nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff);
|
||||
nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff);
|
||||
nvkm_done(&base->base.gpuobj);
|
||||
|
||||
ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_fifo_context_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct gf100_fifo_base *base = (void *)object;
|
||||
nvkm_vm_ref(NULL, &base->vm, base->pgd);
|
||||
nvkm_gpuobj_del(&base->pgd);
|
||||
nvkm_fifo_context_destroy(&base->base);
|
||||
}
|
||||
|
||||
static struct nvkm_oclass
|
||||
gf100_fifo_cclass = {
|
||||
.handle = NV_ENGCTX(FIFO, 0xc0),
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = gf100_fifo_context_ctor,
|
||||
.dtor = gf100_fifo_context_dtor,
|
||||
.init = _nvkm_fifo_context_init,
|
||||
.fini = _nvkm_fifo_context_fini,
|
||||
.rd32 = _nvkm_fifo_context_rd32,
|
||||
.wr32 = _nvkm_fifo_context_wr32,
|
||||
},
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* PFIFO engine
|
||||
******************************************************************************/
|
||||
|
||||
static inline int
|
||||
gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
|
||||
{
|
||||
@ -739,7 +436,7 @@ gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
gf100_fifo_intr_engine(struct gf100_fifo *fifo)
|
||||
{
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
@ -825,28 +522,62 @@ gf100_fifo_intr(struct nvkm_subdev *subdev)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_fifo_uevent_init(struct nvkm_event *event, int type, int index)
|
||||
static int
|
||||
gf100_fifo_init(struct nvkm_object *object)
|
||||
{
|
||||
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
|
||||
struct nvkm_device *device = fifo->engine.subdev.device;
|
||||
nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
|
||||
struct gf100_fifo *fifo = (void *)object;
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
int ret, i;
|
||||
|
||||
ret = nvkm_fifo_init(&fifo->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_wr32(device, 0x000204, 0xffffffff);
|
||||
nvkm_wr32(device, 0x002204, 0xffffffff);
|
||||
|
||||
fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x002204));
|
||||
nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
|
||||
|
||||
/* assign engines to PBDMAs */
|
||||
if (fifo->spoon_nr >= 3) {
|
||||
nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
|
||||
nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
|
||||
nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
|
||||
nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
|
||||
nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
|
||||
nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
|
||||
}
|
||||
|
||||
/* PBDMA[n] */
|
||||
for (i = 0; i < fifo->spoon_nr; i++) {
|
||||
nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
|
||||
nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
|
||||
nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
|
||||
}
|
||||
|
||||
nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
|
||||
nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
|
||||
|
||||
nvkm_wr32(device, 0x002100, 0xffffffff);
|
||||
nvkm_wr32(device, 0x002140, 0x7fffffff);
|
||||
nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
|
||||
gf100_fifo_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
|
||||
struct nvkm_device *device = fifo->engine.subdev.device;
|
||||
nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
|
||||
}
|
||||
struct gf100_fifo *fifo = (void *)object;
|
||||
|
||||
static const struct nvkm_event_func
|
||||
gf100_fifo_uevent_func = {
|
||||
.ctor = nvkm_fifo_uevent_ctor,
|
||||
.init = gf100_fifo_uevent_init,
|
||||
.fini = gf100_fifo_uevent_fini,
|
||||
};
|
||||
nvkm_vm_put(&fifo->user.bar);
|
||||
nvkm_memory_del(&fifo->user.mem);
|
||||
nvkm_memory_del(&fifo->runlist.mem[0]);
|
||||
nvkm_memory_del(&fifo->runlist.mem[1]);
|
||||
|
||||
nvkm_fifo_destroy(&fifo->base);
|
||||
}
|
||||
|
||||
static int
|
||||
gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
@ -899,62 +630,6 @@ gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_fifo_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct gf100_fifo *fifo = (void *)object;
|
||||
|
||||
nvkm_vm_put(&fifo->user.bar);
|
||||
nvkm_memory_del(&fifo->user.mem);
|
||||
nvkm_memory_del(&fifo->runlist.mem[0]);
|
||||
nvkm_memory_del(&fifo->runlist.mem[1]);
|
||||
|
||||
nvkm_fifo_destroy(&fifo->base);
|
||||
}
|
||||
|
||||
static int
|
||||
gf100_fifo_init(struct nvkm_object *object)
|
||||
{
|
||||
struct gf100_fifo *fifo = (void *)object;
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
int ret, i;
|
||||
|
||||
ret = nvkm_fifo_init(&fifo->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_wr32(device, 0x000204, 0xffffffff);
|
||||
nvkm_wr32(device, 0x002204, 0xffffffff);
|
||||
|
||||
fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x002204));
|
||||
nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
|
||||
|
||||
/* assign engines to PBDMAs */
|
||||
if (fifo->spoon_nr >= 3) {
|
||||
nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
|
||||
nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
|
||||
nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
|
||||
nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
|
||||
nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
|
||||
nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
|
||||
}
|
||||
|
||||
/* PBDMA[n] */
|
||||
for (i = 0; i < fifo->spoon_nr; i++) {
|
||||
nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
|
||||
nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
|
||||
nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
|
||||
}
|
||||
|
||||
nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
|
||||
nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
|
||||
|
||||
nvkm_wr32(device, 0x002100, 0xffffffff);
|
||||
nvkm_wr32(device, 0x002140, 0x7fffffff);
|
||||
nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_oclass *
|
||||
gf100_fifo_oclass = &(struct nvkm_oclass) {
|
||||
|
26
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
Normal file
26
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
Normal file
@ -0,0 +1,26 @@
|
||||
#ifndef __GF100_FIFO_H__
|
||||
#define __GF100_FIFO_H__
|
||||
#include "priv.h"
|
||||
|
||||
struct gf100_fifo {
|
||||
struct nvkm_fifo base;
|
||||
|
||||
struct work_struct fault;
|
||||
u64 mask;
|
||||
|
||||
struct {
|
||||
struct nvkm_memory *mem[2];
|
||||
int active;
|
||||
wait_queue_head_t wait;
|
||||
} runlist;
|
||||
|
||||
struct {
|
||||
struct nvkm_memory *mem;
|
||||
struct nvkm_vma bar;
|
||||
} user;
|
||||
int spoon_nr;
|
||||
};
|
||||
|
||||
void gf100_fifo_intr_engine(struct gf100_fifo *);
|
||||
void gf100_fifo_runlist_update(struct gf100_fifo *);
|
||||
#endif
|
@ -22,20 +22,15 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "gk104.h"
|
||||
#include "changk104.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <core/engctx.h>
|
||||
#include <core/enum.h>
|
||||
#include <core/handle.h>
|
||||
#include <subdev/bar.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/mmu.h>
|
||||
#include <subdev/timer.h>
|
||||
#include <engine/sw.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/ioctl.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
|
||||
static const struct {
|
||||
@ -54,47 +49,30 @@ static const struct {
|
||||
#undef _
|
||||
#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
|
||||
|
||||
struct gk104_fifo_engn {
|
||||
struct nvkm_memory *runlist[2];
|
||||
int cur_runlist;
|
||||
wait_queue_head_t wait;
|
||||
};
|
||||
|
||||
struct gk104_fifo {
|
||||
struct nvkm_fifo base;
|
||||
|
||||
struct work_struct fault;
|
||||
u64 mask;
|
||||
|
||||
struct gk104_fifo_engn engine[FIFO_ENGINE_NR];
|
||||
struct {
|
||||
struct nvkm_memory *mem;
|
||||
struct nvkm_vma bar;
|
||||
} user;
|
||||
int spoon_nr;
|
||||
};
|
||||
|
||||
struct gk104_fifo_base {
|
||||
struct nvkm_fifo_base base;
|
||||
struct nvkm_gpuobj *pgd;
|
||||
struct nvkm_vm *vm;
|
||||
};
|
||||
|
||||
struct gk104_fifo_chan {
|
||||
struct nvkm_fifo_chan base;
|
||||
u32 engine;
|
||||
enum {
|
||||
STOPPED,
|
||||
RUNNING,
|
||||
KILLED
|
||||
} state;
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* FIFO channel objects
|
||||
******************************************************************************/
|
||||
static void
|
||||
gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
|
||||
{
|
||||
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
|
||||
struct nvkm_device *device = fifo->engine.subdev.device;
|
||||
nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
|
||||
}
|
||||
|
||||
static void
|
||||
gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index)
|
||||
{
|
||||
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
|
||||
struct nvkm_device *device = fifo->engine.subdev.device;
|
||||
nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
|
||||
}
|
||||
|
||||
static const struct nvkm_event_func
|
||||
gk104_fifo_uevent_func = {
|
||||
.ctor = nvkm_fifo_uevent_ctor,
|
||||
.init = gk104_fifo_uevent_init,
|
||||
.fini = gk104_fifo_uevent_fini,
|
||||
};
|
||||
|
||||
void
|
||||
gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
|
||||
{
|
||||
struct gk104_fifo_engn *engn = &fifo->engine[engine];
|
||||
@ -128,322 +106,6 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
|
||||
mutex_unlock(&nv_subdev(fifo)->mutex);
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_fifo_context_attach(struct nvkm_object *parent,
|
||||
struct nvkm_object *object)
|
||||
{
|
||||
struct gk104_fifo_base *base = (void *)parent->parent;
|
||||
struct nvkm_gpuobj *engn = &base->base.gpuobj;
|
||||
struct nvkm_engctx *ectx = (void *)object;
|
||||
u32 addr;
|
||||
int ret;
|
||||
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_SW :
|
||||
return 0;
|
||||
case NVDEV_ENGINE_CE0:
|
||||
case NVDEV_ENGINE_CE1:
|
||||
case NVDEV_ENGINE_CE2:
|
||||
nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
|
||||
return 0;
|
||||
case NVDEV_ENGINE_GR : addr = 0x0210; break;
|
||||
case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
|
||||
case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
|
||||
case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!ectx->vma.node) {
|
||||
ret = nvkm_gpuobj_map(nv_gpuobj(ectx), base->vm,
|
||||
NV_MEM_ACCESS_RW, &ectx->vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
|
||||
}
|
||||
|
||||
nvkm_kmap(engn);
|
||||
nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
|
||||
nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset));
|
||||
nvkm_done(engn);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
|
||||
{
|
||||
struct nvkm_object *obj = (void *)chan;
|
||||
struct gk104_fifo *fifo = (void *)obj->engine;
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
|
||||
nvkm_wr32(device, 0x002634, chan->base.chid);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
|
||||
break;
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "channel %d [%s] kick timeout\n",
|
||||
chan->base.chid, nvkm_client_name(chan));
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
|
||||
struct nvkm_object *object)
|
||||
{
|
||||
struct gk104_fifo_base *base = (void *)parent->parent;
|
||||
struct gk104_fifo_chan *chan = (void *)parent;
|
||||
struct nvkm_gpuobj *engn = &base->base.gpuobj;
|
||||
u32 addr;
|
||||
int ret;
|
||||
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_SW : return 0;
|
||||
case NVDEV_ENGINE_CE0 :
|
||||
case NVDEV_ENGINE_CE1 :
|
||||
case NVDEV_ENGINE_CE2 : addr = 0x0000; break;
|
||||
case NVDEV_ENGINE_GR : addr = 0x0210; break;
|
||||
case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
|
||||
case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
|
||||
case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = gk104_fifo_chan_kick(chan);
|
||||
if (ret && suspend)
|
||||
return ret;
|
||||
|
||||
if (addr) {
|
||||
nvkm_kmap(engn);
|
||||
nvkm_wo32(engn, addr + 0x00, 0x00000000);
|
||||
nvkm_wo32(engn, addr + 0x04, 0x00000000);
|
||||
nvkm_done(engn);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct kepler_channel_gpfifo_a_v0 v0;
|
||||
} *args = data;
|
||||
struct gk104_fifo *fifo = (void *)engine;
|
||||
struct gk104_fifo_base *base = (void *)parent;
|
||||
struct gk104_fifo_chan *chan;
|
||||
struct nvkm_gpuobj *ramfc = &base->base.gpuobj;
|
||||
u64 usermem, ioffset, ilength;
|
||||
u32 engines;
|
||||
int ret, i;
|
||||
|
||||
nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
|
||||
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||
nvif_ioctl(parent, "create channel gpfifo vers %d "
|
||||
"ioffset %016llx ilength %08x engine %08x\n",
|
||||
args->v0.version, args->v0.ioffset,
|
||||
args->v0.ilength, args->v0.engine);
|
||||
if (args->v0.vm)
|
||||
return -ENOENT;
|
||||
} else
|
||||
return ret;
|
||||
|
||||
for (i = 0, engines = 0; i < FIFO_ENGINE_NR; i++) {
|
||||
if (!nvkm_engine(parent, fifo_engine[i].subdev))
|
||||
continue;
|
||||
engines |= (1 << i);
|
||||
}
|
||||
|
||||
if (!args->v0.engine) {
|
||||
static struct nvkm_oclass oclass = {
|
||||
.ofuncs = &nvkm_object_ofuncs,
|
||||
};
|
||||
args->v0.engine = engines;
|
||||
return nvkm_object_old(parent, engine, &oclass, NULL, 0, pobject);
|
||||
}
|
||||
|
||||
engines &= args->v0.engine;
|
||||
if (!engines) {
|
||||
nvif_ioctl(parent, "unsupported engines %08x\n",
|
||||
args->v0.engine);
|
||||
return -ENODEV;
|
||||
}
|
||||
i = __ffs(engines);
|
||||
|
||||
ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
|
||||
fifo->user.bar.offset, 0x200, 0,
|
||||
fifo_engine[i].mask, &chan);
|
||||
*pobject = nv_object(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
chan->base.inst = base->base.gpuobj.addr;
|
||||
args->v0.chid = chan->base.chid;
|
||||
|
||||
nv_parent(chan)->context_attach = gk104_fifo_context_attach;
|
||||
nv_parent(chan)->context_detach = gk104_fifo_context_detach;
|
||||
chan->engine = i;
|
||||
|
||||
usermem = chan->base.chid * 0x200;
|
||||
ioffset = args->v0.ioffset;
|
||||
ilength = order_base_2(args->v0.ilength / 8);
|
||||
|
||||
nvkm_kmap(fifo->user.mem);
|
||||
for (i = 0; i < 0x200; i += 4)
|
||||
nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
|
||||
nvkm_done(fifo->user.mem);
|
||||
usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
|
||||
|
||||
nvkm_kmap(ramfc);
|
||||
nvkm_wo32(ramfc, 0x08, lower_32_bits(usermem));
|
||||
nvkm_wo32(ramfc, 0x0c, upper_32_bits(usermem));
|
||||
nvkm_wo32(ramfc, 0x10, 0x0000face);
|
||||
nvkm_wo32(ramfc, 0x30, 0xfffff902);
|
||||
nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
|
||||
nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
|
||||
nvkm_wo32(ramfc, 0x84, 0x20400000);
|
||||
nvkm_wo32(ramfc, 0x94, 0x30000001);
|
||||
nvkm_wo32(ramfc, 0x9c, 0x00000100);
|
||||
nvkm_wo32(ramfc, 0xac, 0x0000001f);
|
||||
nvkm_wo32(ramfc, 0xe8, chan->base.chid);
|
||||
nvkm_wo32(ramfc, 0xb8, 0xf8000000);
|
||||
nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */
|
||||
nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */
|
||||
nvkm_done(ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_fifo_chan_init(struct nvkm_object *object)
|
||||
{
|
||||
struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
|
||||
struct gk104_fifo *fifo = (void *)object->engine;
|
||||
struct gk104_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
u32 chid = chan->base.chid;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_channel_init(&chan->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_mask(device, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
|
||||
nvkm_wr32(device, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
|
||||
|
||||
if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
|
||||
nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
|
||||
gk104_fifo_runlist_update(fifo, chan->engine);
|
||||
nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
|
||||
{
|
||||
struct gk104_fifo *fifo = (void *)object->engine;
|
||||
struct gk104_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
u32 chid = chan->base.chid;
|
||||
|
||||
if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
|
||||
nvkm_mask(device, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
|
||||
gk104_fifo_runlist_update(fifo, chan->engine);
|
||||
}
|
||||
|
||||
nvkm_wr32(device, 0x800000 + (chid * 8), 0x00000000);
|
||||
return nvkm_fifo_channel_fini(&chan->base, suspend);
|
||||
}
|
||||
|
||||
struct nvkm_ofuncs
|
||||
gk104_fifo_chan_ofuncs = {
|
||||
.ctor = gk104_fifo_chan_ctor,
|
||||
.dtor = _nvkm_fifo_channel_dtor,
|
||||
.init = gk104_fifo_chan_init,
|
||||
.fini = gk104_fifo_chan_fini,
|
||||
.map = _nvkm_fifo_channel_map,
|
||||
.rd32 = _nvkm_fifo_channel_rd32,
|
||||
.wr32 = _nvkm_fifo_channel_wr32,
|
||||
.ntfy = _nvkm_fifo_channel_ntfy
|
||||
};
|
||||
|
||||
static struct nvkm_oclass
|
||||
gk104_fifo_sclass[] = {
|
||||
{ KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
|
||||
{}
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* FIFO context - instmem heap and vm setup
|
||||
******************************************************************************/
|
||||
|
||||
static int
|
||||
gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_device *device = nv_engine(engine)->subdev.device;
|
||||
struct gk104_fifo_base *base;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
|
||||
0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
|
||||
*pobject = nv_object(base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &base->pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_kmap(&base->base.gpuobj);
|
||||
nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr));
|
||||
nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr));
|
||||
nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff);
|
||||
nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff);
|
||||
nvkm_done(&base->base.gpuobj);
|
||||
|
||||
ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
gk104_fifo_context_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct gk104_fifo_base *base = (void *)object;
|
||||
nvkm_vm_ref(NULL, &base->vm, base->pgd);
|
||||
nvkm_gpuobj_del(&base->pgd);
|
||||
nvkm_fifo_context_destroy(&base->base);
|
||||
}
|
||||
|
||||
static struct nvkm_oclass
|
||||
gk104_fifo_cclass = {
|
||||
.handle = NV_ENGCTX(FIFO, 0xe0),
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = gk104_fifo_context_ctor,
|
||||
.dtor = gk104_fifo_context_dtor,
|
||||
.init = _nvkm_fifo_context_init,
|
||||
.fini = _nvkm_fifo_context_fini,
|
||||
.rd32 = _nvkm_fifo_context_rd32,
|
||||
.wr32 = _nvkm_fifo_context_wr32,
|
||||
},
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* PFIFO engine
|
||||
******************************************************************************/
|
||||
|
||||
static inline int
|
||||
gk104_fifo_engidx(struct gk104_fifo *fifo, u32 engn)
|
||||
{
|
||||
@ -998,29 +660,6 @@ gk104_fifo_intr(struct nvkm_subdev *subdev)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index)
|
||||
{
|
||||
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
|
||||
struct nvkm_device *device = fifo->engine.subdev.device;
|
||||
nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
|
||||
}
|
||||
|
||||
static void
|
||||
gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
|
||||
{
|
||||
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
|
||||
struct nvkm_device *device = fifo->engine.subdev.device;
|
||||
nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
|
||||
}
|
||||
|
||||
static const struct nvkm_event_func
|
||||
gk104_fifo_uevent_func = {
|
||||
.ctor = nvkm_fifo_uevent_ctor,
|
||||
.init = gk104_fifo_uevent_init,
|
||||
.fini = gk104_fifo_uevent_fini,
|
||||
};
|
||||
|
||||
int
|
||||
gk104_fifo_fini(struct nvkm_object *object, bool suspend)
|
||||
{
|
||||
|
@ -1,6 +1,31 @@
|
||||
#ifndef __NVKM_FIFO_NVE0_H__
|
||||
#define __NVKM_FIFO_NVE0_H__
|
||||
#include <engine/fifo.h>
|
||||
#ifndef __GK104_FIFO_H__
|
||||
#define __GK104_FIFO_H__
|
||||
#include "priv.h"
|
||||
|
||||
struct gk104_fifo_engn {
|
||||
struct nvkm_memory *runlist[2];
|
||||
int cur_runlist;
|
||||
wait_queue_head_t wait;
|
||||
};
|
||||
|
||||
struct gk104_fifo {
|
||||
struct nvkm_fifo base;
|
||||
|
||||
struct work_struct fault;
|
||||
u64 mask;
|
||||
|
||||
struct gk104_fifo_engn engine[7];
|
||||
struct {
|
||||
struct nvkm_memory *mem;
|
||||
struct nvkm_vma bar;
|
||||
} user;
|
||||
int spoon_nr;
|
||||
};
|
||||
|
||||
struct gk104_fifo_impl {
|
||||
struct nvkm_oclass base;
|
||||
u32 channels;
|
||||
};
|
||||
|
||||
int gk104_fifo_ctor(struct nvkm_object *, struct nvkm_object *,
|
||||
struct nvkm_oclass *, void *, u32,
|
||||
@ -8,13 +33,7 @@ int gk104_fifo_ctor(struct nvkm_object *, struct nvkm_object *,
|
||||
void gk104_fifo_dtor(struct nvkm_object *);
|
||||
int gk104_fifo_init(struct nvkm_object *);
|
||||
int gk104_fifo_fini(struct nvkm_object *, bool);
|
||||
|
||||
struct gk104_fifo_impl {
|
||||
struct nvkm_oclass base;
|
||||
u32 channels;
|
||||
};
|
||||
|
||||
extern struct nvkm_ofuncs gk104_fifo_chan_ofuncs;
|
||||
void gk104_fifo_runlist_update(struct gk104_fifo *, u32 engine);
|
||||
|
||||
int gm204_fifo_ctor(struct nvkm_object *, struct nvkm_object *,
|
||||
struct nvkm_oclass *, void *, u32,
|
||||
|
@ -22,14 +22,7 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "gk104.h"
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
static struct nvkm_oclass
|
||||
gm204_fifo_sclass[] = {
|
||||
{ MAXWELL_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
|
||||
{}
|
||||
};
|
||||
#include "changk104.h"
|
||||
|
||||
int
|
||||
gm204_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
|
122
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
Normal file
122
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
Normal file
@ -0,0 +1,122 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "channv50.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <core/ramht.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
static int
|
||||
g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct nv50_channel_gpfifo_v0 v0;
|
||||
} *args = data;
|
||||
struct nvkm_device *device = parent->engine->subdev.device;
|
||||
struct nv50_fifo_base *base = (void *)parent;
|
||||
struct nv50_fifo_chan *chan;
|
||||
u64 ioffset, ilength;
|
||||
int ret;
|
||||
|
||||
nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
|
||||
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||
nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
|
||||
"pushbuf %llx ioffset %016llx "
|
||||
"ilength %08x\n",
|
||||
args->v0.version, args->v0.vm, args->v0.pushbuf,
|
||||
args->v0.ioffset, args->v0.ilength);
|
||||
if (args->v0.vm)
|
||||
return -ENOENT;
|
||||
} else
|
||||
return ret;
|
||||
|
||||
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
|
||||
0x2000, args->v0.pushbuf,
|
||||
(1ULL << NVDEV_ENGINE_DMAOBJ) |
|
||||
(1ULL << NVDEV_ENGINE_SW) |
|
||||
(1ULL << NVDEV_ENGINE_GR) |
|
||||
(1ULL << NVDEV_ENGINE_MPEG) |
|
||||
(1ULL << NVDEV_ENGINE_ME) |
|
||||
(1ULL << NVDEV_ENGINE_VP) |
|
||||
(1ULL << NVDEV_ENGINE_CIPHER) |
|
||||
(1ULL << NVDEV_ENGINE_SEC) |
|
||||
(1ULL << NVDEV_ENGINE_BSP) |
|
||||
(1ULL << NVDEV_ENGINE_MSVLD) |
|
||||
(1ULL << NVDEV_ENGINE_MSPDEC) |
|
||||
(1ULL << NVDEV_ENGINE_MSPPP) |
|
||||
(1ULL << NVDEV_ENGINE_CE0) |
|
||||
(1ULL << NVDEV_ENGINE_VIC), &chan);
|
||||
*pobject = nv_object(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
chan->base.inst = base->base.gpuobj.addr;
|
||||
args->v0.chid = chan->base.chid;
|
||||
|
||||
ret = nvkm_ramht_new(device, 0x8000, 16, &base->base.gpuobj,
|
||||
&chan->ramht);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_parent(chan)->context_attach = g84_fifo_context_attach;
|
||||
nv_parent(chan)->context_detach = g84_fifo_context_detach;
|
||||
nv_parent(chan)->object_attach = g84_fifo_object_attach;
|
||||
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
|
||||
|
||||
ioffset = args->v0.ioffset;
|
||||
ilength = order_base_2(args->v0.ilength / 8);
|
||||
|
||||
nvkm_kmap(base->ramfc);
|
||||
nvkm_wo32(base->ramfc, 0x3c, 0x403f6078);
|
||||
nvkm_wo32(base->ramfc, 0x44, 0x01003fff);
|
||||
nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
|
||||
nvkm_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
|
||||
nvkm_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
|
||||
nvkm_wo32(base->ramfc, 0x60, 0x7fffffff);
|
||||
nvkm_wo32(base->ramfc, 0x78, 0x00000000);
|
||||
nvkm_wo32(base->ramfc, 0x7c, 0x30000001);
|
||||
nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
|
||||
(4 << 24) /* SEARCH_FULL */ |
|
||||
(chan->ramht->gpuobj->node->offset >> 4));
|
||||
nvkm_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
|
||||
nvkm_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
|
||||
nvkm_done(base->ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_ofuncs
|
||||
g84_fifo_ofuncs_ind = {
|
||||
.ctor = g84_fifo_chan_ctor_ind,
|
||||
.dtor = nv50_fifo_chan_dtor,
|
||||
.init = g84_fifo_chan_init,
|
||||
.fini = nv50_fifo_chan_fini,
|
||||
.map = _nvkm_fifo_channel_map,
|
||||
.rd32 = _nvkm_fifo_channel_rd32,
|
||||
.wr32 = _nvkm_fifo_channel_wr32,
|
||||
.ntfy = _nvkm_fifo_channel_ntfy
|
||||
};
|
304
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
Normal file
304
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
Normal file
@ -0,0 +1,304 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "changf100.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/timer.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
static int
|
||||
gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
|
||||
struct nvkm_object *object)
|
||||
{
|
||||
struct gf100_fifo *fifo = (void *)parent->engine;
|
||||
struct gf100_fifo_base *base = (void *)parent->parent;
|
||||
struct gf100_fifo_chan *chan = (void *)parent;
|
||||
struct nvkm_gpuobj *engn = &base->base.gpuobj;
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 addr;
|
||||
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_SW : return 0;
|
||||
case NVDEV_ENGINE_GR : addr = 0x0210; break;
|
||||
case NVDEV_ENGINE_CE0 : addr = 0x0230; break;
|
||||
case NVDEV_ENGINE_CE1 : addr = 0x0240; break;
|
||||
case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
|
||||
case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
|
||||
case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nvkm_wr32(device, 0x002634, chan->base.chid);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x002634) == chan->base.chid)
|
||||
break;
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "channel %d [%s] kick timeout\n",
|
||||
chan->base.chid, nvkm_client_name(chan));
|
||||
if (suspend)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
nvkm_kmap(engn);
|
||||
nvkm_wo32(engn, addr + 0x00, 0x00000000);
|
||||
nvkm_wo32(engn, addr + 0x04, 0x00000000);
|
||||
nvkm_done(engn);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gf100_fifo_context_attach(struct nvkm_object *parent,
|
||||
struct nvkm_object *object)
|
||||
{
|
||||
struct gf100_fifo_base *base = (void *)parent->parent;
|
||||
struct nvkm_gpuobj *engn = &base->base.gpuobj;
|
||||
struct nvkm_engctx *ectx = (void *)object;
|
||||
u32 addr;
|
||||
int ret;
|
||||
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_SW : return 0;
|
||||
case NVDEV_ENGINE_GR : addr = 0x0210; break;
|
||||
case NVDEV_ENGINE_CE0 : addr = 0x0230; break;
|
||||
case NVDEV_ENGINE_CE1 : addr = 0x0240; break;
|
||||
case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
|
||||
case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
|
||||
case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!ectx->vma.node) {
|
||||
ret = nvkm_gpuobj_map(nv_gpuobj(ectx), base->vm,
|
||||
NV_MEM_ACCESS_RW, &ectx->vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
|
||||
}
|
||||
|
||||
nvkm_kmap(engn);
|
||||
nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
|
||||
nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset));
|
||||
nvkm_done(engn);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gf100_fifo_chan_fini(struct nvkm_object *object, bool suspend)
|
||||
{
|
||||
struct gf100_fifo *fifo = (void *)object->engine;
|
||||
struct gf100_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
u32 chid = chan->base.chid;
|
||||
|
||||
if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
|
||||
nvkm_mask(device, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
|
||||
gf100_fifo_runlist_update(fifo);
|
||||
}
|
||||
|
||||
gf100_fifo_intr_engine(fifo);
|
||||
|
||||
nvkm_wr32(device, 0x003000 + (chid * 8), 0x00000000);
|
||||
return nvkm_fifo_channel_fini(&chan->base, suspend);
|
||||
}
|
||||
|
||||
static int
|
||||
gf100_fifo_chan_init(struct nvkm_object *object)
|
||||
{
|
||||
struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
|
||||
struct gf100_fifo *fifo = (void *)object->engine;
|
||||
struct gf100_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
u32 chid = chan->base.chid;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_channel_init(&chan->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_wr32(device, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
|
||||
|
||||
if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
|
||||
nvkm_wr32(device, 0x003004 + (chid * 8), 0x001f0001);
|
||||
gf100_fifo_runlist_update(fifo);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct fermi_channel_gpfifo_v0 v0;
|
||||
} *args = data;
|
||||
struct gf100_fifo *fifo = (void *)engine;
|
||||
struct gf100_fifo_base *base = (void *)parent;
|
||||
struct gf100_fifo_chan *chan;
|
||||
struct nvkm_gpuobj *ramfc = &base->base.gpuobj;
|
||||
u64 usermem, ioffset, ilength;
|
||||
int ret, i;
|
||||
|
||||
nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
|
||||
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||
nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx"
|
||||
"ioffset %016llx ilength %08x\n",
|
||||
args->v0.version, args->v0.vm, args->v0.ioffset,
|
||||
args->v0.ilength);
|
||||
if (args->v0.vm)
|
||||
return -ENOENT;
|
||||
} else
|
||||
return ret;
|
||||
|
||||
ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
|
||||
fifo->user.bar.offset, 0x1000, 0,
|
||||
(1ULL << NVDEV_ENGINE_SW) |
|
||||
(1ULL << NVDEV_ENGINE_GR) |
|
||||
(1ULL << NVDEV_ENGINE_CE0) |
|
||||
(1ULL << NVDEV_ENGINE_CE1) |
|
||||
(1ULL << NVDEV_ENGINE_MSVLD) |
|
||||
(1ULL << NVDEV_ENGINE_MSPDEC) |
|
||||
(1ULL << NVDEV_ENGINE_MSPPP), &chan);
|
||||
*pobject = nv_object(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
chan->base.inst = base->base.gpuobj.addr;
|
||||
args->v0.chid = chan->base.chid;
|
||||
|
||||
nv_parent(chan)->context_attach = gf100_fifo_context_attach;
|
||||
nv_parent(chan)->context_detach = gf100_fifo_context_detach;
|
||||
|
||||
usermem = chan->base.chid * 0x1000;
|
||||
ioffset = args->v0.ioffset;
|
||||
ilength = order_base_2(args->v0.ilength / 8);
|
||||
|
||||
nvkm_kmap(fifo->user.mem);
|
||||
for (i = 0; i < 0x1000; i += 4)
|
||||
nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
|
||||
nvkm_done(fifo->user.mem);
|
||||
usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
|
||||
|
||||
nvkm_kmap(ramfc);
|
||||
nvkm_wo32(ramfc, 0x08, lower_32_bits(usermem));
|
||||
nvkm_wo32(ramfc, 0x0c, upper_32_bits(usermem));
|
||||
nvkm_wo32(ramfc, 0x10, 0x0000face);
|
||||
nvkm_wo32(ramfc, 0x30, 0xfffff902);
|
||||
nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
|
||||
nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
|
||||
nvkm_wo32(ramfc, 0x54, 0x00000002);
|
||||
nvkm_wo32(ramfc, 0x84, 0x20400000);
|
||||
nvkm_wo32(ramfc, 0x94, 0x30000001);
|
||||
nvkm_wo32(ramfc, 0x9c, 0x00000100);
|
||||
nvkm_wo32(ramfc, 0xa4, 0x1f1f1f1f);
|
||||
nvkm_wo32(ramfc, 0xa8, 0x1f1f1f1f);
|
||||
nvkm_wo32(ramfc, 0xac, 0x0000001f);
|
||||
nvkm_wo32(ramfc, 0xb8, 0xf8000000);
|
||||
nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */
|
||||
nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */
|
||||
nvkm_done(ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nvkm_ofuncs
|
||||
gf100_fifo_ofuncs = {
|
||||
.ctor = gf100_fifo_chan_ctor,
|
||||
.dtor = _nvkm_fifo_channel_dtor,
|
||||
.init = gf100_fifo_chan_init,
|
||||
.fini = gf100_fifo_chan_fini,
|
||||
.map = _nvkm_fifo_channel_map,
|
||||
.rd32 = _nvkm_fifo_channel_rd32,
|
||||
.wr32 = _nvkm_fifo_channel_wr32,
|
||||
.ntfy = _nvkm_fifo_channel_ntfy
|
||||
};
|
||||
|
||||
struct nvkm_oclass
|
||||
gf100_fifo_sclass[] = {
|
||||
{ FERMI_CHANNEL_GPFIFO, &gf100_fifo_ofuncs },
|
||||
{}
|
||||
};
|
||||
|
||||
static int
|
||||
gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_device *device = nv_engine(engine)->subdev.device;
|
||||
struct gf100_fifo_base *base;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
|
||||
0x1000, NVOBJ_FLAG_ZERO_ALLOC |
|
||||
NVOBJ_FLAG_HEAP, &base);
|
||||
*pobject = nv_object(base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &base->pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_kmap(&base->base.gpuobj);
|
||||
nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr));
|
||||
nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr));
|
||||
nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff);
|
||||
nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff);
|
||||
nvkm_done(&base->base.gpuobj);
|
||||
|
||||
ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_fifo_context_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct gf100_fifo_base *base = (void *)object;
|
||||
nvkm_vm_ref(NULL, &base->vm, base->pgd);
|
||||
nvkm_gpuobj_del(&base->pgd);
|
||||
nvkm_fifo_context_destroy(&base->base);
|
||||
}
|
||||
|
||||
struct nvkm_oclass
|
||||
gf100_fifo_cclass = {
|
||||
.handle = NV_ENGCTX(FIFO, 0xc0),
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = gf100_fifo_context_ctor,
|
||||
.dtor = gf100_fifo_context_dtor,
|
||||
.init = _nvkm_fifo_context_init,
|
||||
.fini = _nvkm_fifo_context_fini,
|
||||
.rd32 = _nvkm_fifo_context_rd32,
|
||||
.wr32 = _nvkm_fifo_context_wr32,
|
||||
},
|
||||
};
|
357
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
Normal file
357
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
Normal file
@ -0,0 +1,357 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "changk104.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/mmu.h>
|
||||
#include <subdev/timer.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
|
||||
static const struct {
|
||||
u64 subdev;
|
||||
u64 mask;
|
||||
} fifo_engine[] = {
|
||||
_(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW) |
|
||||
(1ULL << NVDEV_ENGINE_CE2)),
|
||||
_(NVDEV_ENGINE_MSPDEC , 0),
|
||||
_(NVDEV_ENGINE_MSPPP , 0),
|
||||
_(NVDEV_ENGINE_MSVLD , 0),
|
||||
_(NVDEV_ENGINE_CE0 , 0),
|
||||
_(NVDEV_ENGINE_CE1 , 0),
|
||||
_(NVDEV_ENGINE_MSENC , 0),
|
||||
};
|
||||
#undef _
|
||||
#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
|
||||
|
||||
static int
|
||||
gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
|
||||
{
|
||||
struct nvkm_object *obj = (void *)chan;
|
||||
struct gk104_fifo *fifo = (void *)obj->engine;
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
|
||||
nvkm_wr32(device, 0x002634, chan->base.chid);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
|
||||
break;
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "channel %d [%s] kick timeout\n",
|
||||
chan->base.chid, nvkm_client_name(chan));
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
|
||||
struct nvkm_object *object)
|
||||
{
|
||||
struct gk104_fifo_base *base = (void *)parent->parent;
|
||||
struct gk104_fifo_chan *chan = (void *)parent;
|
||||
struct nvkm_gpuobj *engn = &base->base.gpuobj;
|
||||
u32 addr;
|
||||
int ret;
|
||||
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_SW : return 0;
|
||||
case NVDEV_ENGINE_CE0 :
|
||||
case NVDEV_ENGINE_CE1 :
|
||||
case NVDEV_ENGINE_CE2 : addr = 0x0000; break;
|
||||
case NVDEV_ENGINE_GR : addr = 0x0210; break;
|
||||
case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
|
||||
case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
|
||||
case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = gk104_fifo_chan_kick(chan);
|
||||
if (ret && suspend)
|
||||
return ret;
|
||||
|
||||
if (addr) {
|
||||
nvkm_kmap(engn);
|
||||
nvkm_wo32(engn, addr + 0x00, 0x00000000);
|
||||
nvkm_wo32(engn, addr + 0x04, 0x00000000);
|
||||
nvkm_done(engn);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_fifo_context_attach(struct nvkm_object *parent,
|
||||
struct nvkm_object *object)
|
||||
{
|
||||
struct gk104_fifo_base *base = (void *)parent->parent;
|
||||
struct nvkm_gpuobj *engn = &base->base.gpuobj;
|
||||
struct nvkm_engctx *ectx = (void *)object;
|
||||
u32 addr;
|
||||
int ret;
|
||||
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_SW :
|
||||
return 0;
|
||||
case NVDEV_ENGINE_CE0:
|
||||
case NVDEV_ENGINE_CE1:
|
||||
case NVDEV_ENGINE_CE2:
|
||||
nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
|
||||
return 0;
|
||||
case NVDEV_ENGINE_GR : addr = 0x0210; break;
|
||||
case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
|
||||
case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
|
||||
case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!ectx->vma.node) {
|
||||
ret = nvkm_gpuobj_map(nv_gpuobj(ectx), base->vm,
|
||||
NV_MEM_ACCESS_RW, &ectx->vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
|
||||
}
|
||||
|
||||
nvkm_kmap(engn);
|
||||
nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
|
||||
nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset));
|
||||
nvkm_done(engn);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
|
||||
{
|
||||
struct gk104_fifo *fifo = (void *)object->engine;
|
||||
struct gk104_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
u32 chid = chan->base.chid;
|
||||
|
||||
if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
|
||||
nvkm_mask(device, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
|
||||
gk104_fifo_runlist_update(fifo, chan->engine);
|
||||
}
|
||||
|
||||
nvkm_wr32(device, 0x800000 + (chid * 8), 0x00000000);
|
||||
return nvkm_fifo_channel_fini(&chan->base, suspend);
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_fifo_chan_init(struct nvkm_object *object)
|
||||
{
|
||||
struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
|
||||
struct gk104_fifo *fifo = (void *)object->engine;
|
||||
struct gk104_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
u32 chid = chan->base.chid;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_channel_init(&chan->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_mask(device, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
|
||||
nvkm_wr32(device, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
|
||||
|
||||
if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
|
||||
nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
|
||||
gk104_fifo_runlist_update(fifo, chan->engine);
|
||||
nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct kepler_channel_gpfifo_a_v0 v0;
|
||||
} *args = data;
|
||||
struct gk104_fifo *fifo = (void *)engine;
|
||||
struct gk104_fifo_base *base = (void *)parent;
|
||||
struct gk104_fifo_chan *chan;
|
||||
struct nvkm_gpuobj *ramfc = &base->base.gpuobj;
|
||||
u64 usermem, ioffset, ilength;
|
||||
u32 engines;
|
||||
int ret, i;
|
||||
|
||||
nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
|
||||
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||
nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx"
|
||||
"ioffset %016llx ilength %08x engine %08x\n",
|
||||
args->v0.version, args->v0.vm, args->v0.ioffset,
|
||||
args->v0.ilength, args->v0.engine);
|
||||
if (args->v0.vm)
|
||||
return -ENOENT;
|
||||
} else
|
||||
return ret;
|
||||
|
||||
for (i = 0, engines = 0; i < FIFO_ENGINE_NR; i++) {
|
||||
if (!nvkm_engine(parent, fifo_engine[i].subdev))
|
||||
continue;
|
||||
engines |= (1 << i);
|
||||
}
|
||||
|
||||
if (!args->v0.engine) {
|
||||
static struct nvkm_oclass oclass = {
|
||||
.ofuncs = &nvkm_object_ofuncs,
|
||||
};
|
||||
args->v0.engine = engines;
|
||||
return nvkm_object_old(parent, engine, &oclass, NULL, 0, pobject);
|
||||
}
|
||||
|
||||
engines &= args->v0.engine;
|
||||
if (!engines) {
|
||||
nvif_ioctl(parent, "unsupported engines %08x\n",
|
||||
args->v0.engine);
|
||||
return -ENODEV;
|
||||
}
|
||||
i = __ffs(engines);
|
||||
|
||||
ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
|
||||
fifo->user.bar.offset, 0x200, 0,
|
||||
fifo_engine[i].mask, &chan);
|
||||
*pobject = nv_object(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
chan->base.inst = base->base.gpuobj.addr;
|
||||
args->v0.chid = chan->base.chid;
|
||||
|
||||
nv_parent(chan)->context_attach = gk104_fifo_context_attach;
|
||||
nv_parent(chan)->context_detach = gk104_fifo_context_detach;
|
||||
chan->engine = i;
|
||||
|
||||
usermem = chan->base.chid * 0x200;
|
||||
ioffset = args->v0.ioffset;
|
||||
ilength = order_base_2(args->v0.ilength / 8);
|
||||
|
||||
nvkm_kmap(fifo->user.mem);
|
||||
for (i = 0; i < 0x200; i += 4)
|
||||
nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
|
||||
nvkm_done(fifo->user.mem);
|
||||
usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
|
||||
|
||||
nvkm_kmap(ramfc);
|
||||
nvkm_wo32(ramfc, 0x08, lower_32_bits(usermem));
|
||||
nvkm_wo32(ramfc, 0x0c, upper_32_bits(usermem));
|
||||
nvkm_wo32(ramfc, 0x10, 0x0000face);
|
||||
nvkm_wo32(ramfc, 0x30, 0xfffff902);
|
||||
nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
|
||||
nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
|
||||
nvkm_wo32(ramfc, 0x84, 0x20400000);
|
||||
nvkm_wo32(ramfc, 0x94, 0x30000001);
|
||||
nvkm_wo32(ramfc, 0x9c, 0x00000100);
|
||||
nvkm_wo32(ramfc, 0xac, 0x0000001f);
|
||||
nvkm_wo32(ramfc, 0xe8, chan->base.chid);
|
||||
nvkm_wo32(ramfc, 0xb8, 0xf8000000);
|
||||
nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */
|
||||
nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */
|
||||
nvkm_done(ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_ofuncs
|
||||
gk104_fifo_chan_ofuncs = {
|
||||
.ctor = gk104_fifo_chan_ctor,
|
||||
.dtor = _nvkm_fifo_channel_dtor,
|
||||
.init = gk104_fifo_chan_init,
|
||||
.fini = gk104_fifo_chan_fini,
|
||||
.map = _nvkm_fifo_channel_map,
|
||||
.rd32 = _nvkm_fifo_channel_rd32,
|
||||
.wr32 = _nvkm_fifo_channel_wr32,
|
||||
.ntfy = _nvkm_fifo_channel_ntfy
|
||||
};
|
||||
|
||||
struct nvkm_oclass
|
||||
gk104_fifo_sclass[] = {
|
||||
{ KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
|
||||
{}
|
||||
};
|
||||
|
||||
static int
|
||||
gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_device *device = nv_engine(engine)->subdev.device;
|
||||
struct gk104_fifo_base *base;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
|
||||
0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
|
||||
*pobject = nv_object(base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &base->pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_kmap(&base->base.gpuobj);
|
||||
nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr));
|
||||
nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr));
|
||||
nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff);
|
||||
nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff);
|
||||
nvkm_done(&base->base.gpuobj);
|
||||
|
||||
ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
gk104_fifo_context_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct gk104_fifo_base *base = (void *)object;
|
||||
nvkm_vm_ref(NULL, &base->vm, base->pgd);
|
||||
nvkm_gpuobj_del(&base->pgd);
|
||||
nvkm_fifo_context_destroy(&base->base);
|
||||
}
|
||||
|
||||
struct nvkm_oclass
|
||||
gk104_fifo_cclass = {
|
||||
.handle = NV_ENGCTX(FIFO, 0xe0),
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = gk104_fifo_context_ctor,
|
||||
.dtor = gk104_fifo_context_dtor,
|
||||
.init = _nvkm_fifo_context_init,
|
||||
.fini = _nvkm_fifo_context_fini,
|
||||
.rd32 = _nvkm_fifo_context_rd32,
|
||||
.wr32 = _nvkm_fifo_context_wr32,
|
||||
},
|
||||
};
|
32
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
Normal file
32
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
Normal file
@ -0,0 +1,32 @@
|
||||
/*
|
||||
* Copyright 2015 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "changk104.h"
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
struct nvkm_oclass
|
||||
gm204_fifo_sclass[] = {
|
||||
{ MAXWELL_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
|
||||
{}
|
||||
};
|
110
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
Normal file
110
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
Normal file
@ -0,0 +1,110 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "channv50.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <core/ramht.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
static int
|
||||
nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct nv50_channel_gpfifo_v0 v0;
|
||||
} *args = data;
|
||||
struct nvkm_device *device = parent->engine->subdev.device;
|
||||
struct nv50_fifo_base *base = (void *)parent;
|
||||
struct nv50_fifo_chan *chan;
|
||||
u64 ioffset, ilength;
|
||||
int ret;
|
||||
|
||||
nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
|
||||
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||
nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
|
||||
"pushbuf %llx ioffset %016llx "
|
||||
"ilength %08x\n",
|
||||
args->v0.version, args->v0.vm, args->v0.pushbuf,
|
||||
args->v0.ioffset, args->v0.ilength);
|
||||
if (args->v0.vm)
|
||||
return -ENOENT;
|
||||
} else
|
||||
return ret;
|
||||
|
||||
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
|
||||
0x2000, args->v0.pushbuf,
|
||||
(1ULL << NVDEV_ENGINE_DMAOBJ) |
|
||||
(1ULL << NVDEV_ENGINE_SW) |
|
||||
(1ULL << NVDEV_ENGINE_GR) |
|
||||
(1ULL << NVDEV_ENGINE_MPEG), &chan);
|
||||
*pobject = nv_object(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
chan->base.inst = base->base.gpuobj.addr;
|
||||
args->v0.chid = chan->base.chid;
|
||||
|
||||
nv_parent(chan)->context_attach = nv50_fifo_context_attach;
|
||||
nv_parent(chan)->context_detach = nv50_fifo_context_detach;
|
||||
nv_parent(chan)->object_attach = nv50_fifo_object_attach;
|
||||
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
|
||||
|
||||
ret = nvkm_ramht_new(device, 0x8000, 16, &base->base.gpuobj,
|
||||
&chan->ramht);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ioffset = args->v0.ioffset;
|
||||
ilength = order_base_2(args->v0.ilength / 8);
|
||||
|
||||
nvkm_kmap(base->ramfc);
|
||||
nvkm_wo32(base->ramfc, 0x3c, 0x403f6078);
|
||||
nvkm_wo32(base->ramfc, 0x44, 0x01003fff);
|
||||
nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
|
||||
nvkm_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
|
||||
nvkm_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
|
||||
nvkm_wo32(base->ramfc, 0x60, 0x7fffffff);
|
||||
nvkm_wo32(base->ramfc, 0x78, 0x00000000);
|
||||
nvkm_wo32(base->ramfc, 0x7c, 0x30000001);
|
||||
nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
|
||||
(4 << 24) /* SEARCH_FULL */ |
|
||||
(chan->ramht->gpuobj->node->offset >> 4));
|
||||
nvkm_done(base->ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_ofuncs
|
||||
nv50_fifo_ofuncs_ind = {
|
||||
.ctor = nv50_fifo_chan_ctor_ind,
|
||||
.dtor = nv50_fifo_chan_dtor,
|
||||
.init = nv50_fifo_chan_init,
|
||||
.fini = nv50_fifo_chan_fini,
|
||||
.map = _nvkm_fifo_channel_map,
|
||||
.rd32 = _nvkm_fifo_channel_rd32,
|
||||
.wr32 = _nvkm_fifo_channel_wr32,
|
||||
.ntfy = _nvkm_fifo_channel_ntfy
|
||||
};
|
@ -22,18 +22,15 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "nv04.h"
|
||||
#include "channv04.h"
|
||||
#include "regsnv04.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <core/engctx.h>
|
||||
#include <core/handle.h>
|
||||
#include <core/ramht.h>
|
||||
#include <subdev/instmem.h>
|
||||
#include <subdev/timer.h>
|
||||
#include <engine/sw.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
static struct ramfc_desc
|
||||
nv04_ramfc[] = {
|
||||
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
|
||||
@ -47,268 +44,6 @@ nv04_ramfc[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* FIFO channel objects
|
||||
******************************************************************************/
|
||||
|
||||
int
|
||||
nv04_fifo_object_attach(struct nvkm_object *parent,
|
||||
struct nvkm_object *object, u32 handle)
|
||||
{
|
||||
struct nv04_fifo *fifo = (void *)parent->engine;
|
||||
struct nv04_fifo_chan *chan = (void *)parent;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
u32 context, chid = chan->base.chid;
|
||||
int ret;
|
||||
|
||||
if (nv_iclass(object, NV_GPUOBJ_CLASS))
|
||||
context = nv_gpuobj(object)->addr >> 4;
|
||||
else
|
||||
context = 0x00000004; /* just non-zero */
|
||||
|
||||
if (object->engine) {
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_DMAOBJ:
|
||||
case NVDEV_ENGINE_SW:
|
||||
context |= 0x00000000;
|
||||
break;
|
||||
case NVDEV_ENGINE_GR:
|
||||
context |= 0x00010000;
|
||||
break;
|
||||
case NVDEV_ENGINE_MPEG:
|
||||
context |= 0x00020000;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
context |= 0x80000000; /* valid */
|
||||
context |= chid << 24;
|
||||
|
||||
mutex_lock(&nv_subdev(fifo)->mutex);
|
||||
ret = nvkm_ramht_insert(imem->ramht, NULL, chid, 0, handle, context);
|
||||
mutex_unlock(&nv_subdev(fifo)->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
nv04_fifo_object_detach(struct nvkm_object *parent, int cookie)
|
||||
{
|
||||
struct nv04_fifo *fifo = (void *)parent->engine;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
mutex_lock(&nv_subdev(fifo)->mutex);
|
||||
nvkm_ramht_remove(imem->ramht, cookie);
|
||||
mutex_unlock(&nv_subdev(fifo)->mutex);
|
||||
}
|
||||
|
||||
int
|
||||
nv04_fifo_context_attach(struct nvkm_object *parent,
|
||||
struct nvkm_object *object)
|
||||
{
|
||||
nv_engctx(object)->addr = nvkm_fifo_chan(parent)->chid;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv04_fifo_chan_ctor(struct nvkm_object *parent,
|
||||
struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct nv03_channel_dma_v0 v0;
|
||||
} *args = data;
|
||||
struct nv04_fifo *fifo = (void *)engine;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
struct nv04_fifo_chan *chan;
|
||||
int ret;
|
||||
|
||||
nvif_ioctl(parent, "create channel dma size %d\n", size);
|
||||
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||
nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
|
||||
"offset %08x\n", args->v0.version,
|
||||
args->v0.pushbuf, args->v0.offset);
|
||||
} else
|
||||
return ret;
|
||||
|
||||
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
|
||||
0x10000, args->v0.pushbuf,
|
||||
(1ULL << NVDEV_ENGINE_DMAOBJ) |
|
||||
(1ULL << NVDEV_ENGINE_SW) |
|
||||
(1ULL << NVDEV_ENGINE_GR), &chan);
|
||||
*pobject = nv_object(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
args->v0.chid = chan->base.chid;
|
||||
|
||||
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
|
||||
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
|
||||
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
|
||||
chan->ramfc = chan->base.chid * 32;
|
||||
|
||||
nvkm_kmap(imem->ramfc);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x10,
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
|
||||
#ifdef __BIG_ENDIAN
|
||||
NV_PFIFO_CACHE1_BIG_ENDIAN |
|
||||
#endif
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
|
||||
nvkm_done(imem->ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv04_fifo_chan_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nv04_fifo *fifo = (void *)object->engine;
|
||||
struct nv04_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
struct ramfc_desc *c = fifo->ramfc_desc;
|
||||
|
||||
nvkm_kmap(imem->ramfc);
|
||||
do {
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + c->ctxp, 0x00000000);
|
||||
} while ((++c)->bits);
|
||||
nvkm_done(imem->ramfc);
|
||||
|
||||
nvkm_fifo_channel_destroy(&chan->base);
|
||||
}
|
||||
|
||||
int
|
||||
nv04_fifo_chan_init(struct nvkm_object *object)
|
||||
{
|
||||
struct nv04_fifo *fifo = (void *)object->engine;
|
||||
struct nv04_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
u32 mask = 1 << chan->base.chid;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_channel_init(&chan->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&fifo->base.lock, flags);
|
||||
nvkm_mask(device, NV04_PFIFO_MODE, mask, mask);
|
||||
spin_unlock_irqrestore(&fifo->base.lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
|
||||
{
|
||||
struct nv04_fifo *fifo = (void *)object->engine;
|
||||
struct nv04_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
struct nvkm_memory *fctx = device->imem->ramfc;
|
||||
struct ramfc_desc *c;
|
||||
unsigned long flags;
|
||||
u32 data = chan->ramfc;
|
||||
u32 chid;
|
||||
|
||||
/* prevent fifo context switches */
|
||||
spin_lock_irqsave(&fifo->base.lock, flags);
|
||||
nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
|
||||
|
||||
/* if this channel is active, replace it with a null context */
|
||||
chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max;
|
||||
if (chid == chan->base.chid) {
|
||||
nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
|
||||
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
|
||||
nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
|
||||
|
||||
c = fifo->ramfc_desc;
|
||||
do {
|
||||
u32 rm = ((1ULL << c->bits) - 1) << c->regs;
|
||||
u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
|
||||
u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs;
|
||||
u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
|
||||
nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
|
||||
} while ((++c)->bits);
|
||||
|
||||
c = fifo->ramfc_desc;
|
||||
do {
|
||||
nvkm_wr32(device, c->regp, 0x00000000);
|
||||
} while ((++c)->bits);
|
||||
|
||||
nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0);
|
||||
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0);
|
||||
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max);
|
||||
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
|
||||
nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
|
||||
}
|
||||
|
||||
/* restore normal operation, after disabling dma mode */
|
||||
nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
|
||||
nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
|
||||
spin_unlock_irqrestore(&fifo->base.lock, flags);
|
||||
|
||||
return nvkm_fifo_channel_fini(&chan->base, suspend);
|
||||
}
|
||||
|
||||
static struct nvkm_ofuncs
|
||||
nv04_fifo_ofuncs = {
|
||||
.ctor = nv04_fifo_chan_ctor,
|
||||
.dtor = nv04_fifo_chan_dtor,
|
||||
.init = nv04_fifo_chan_init,
|
||||
.fini = nv04_fifo_chan_fini,
|
||||
.map = _nvkm_fifo_channel_map,
|
||||
.rd32 = _nvkm_fifo_channel_rd32,
|
||||
.wr32 = _nvkm_fifo_channel_wr32,
|
||||
.ntfy = _nvkm_fifo_channel_ntfy
|
||||
};
|
||||
|
||||
static struct nvkm_oclass
|
||||
nv04_fifo_sclass[] = {
|
||||
{ NV03_CHANNEL_DMA, &nv04_fifo_ofuncs },
|
||||
{}
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* FIFO context - basically just the instmem reserved for the channel
|
||||
******************************************************************************/
|
||||
|
||||
int
|
||||
nv04_fifo_context_ctor(struct nvkm_object *parent,
|
||||
struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nv04_fifo_base *base;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
|
||||
0x1000, NVOBJ_FLAG_HEAP, &base);
|
||||
*pobject = nv_object(base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nvkm_oclass
|
||||
nv04_fifo_cclass = {
|
||||
.handle = NV_ENGCTX(FIFO, 0x04),
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = nv04_fifo_context_ctor,
|
||||
.dtor = _nvkm_fifo_context_dtor,
|
||||
.init = _nvkm_fifo_context_init,
|
||||
.fini = _nvkm_fifo_context_fini,
|
||||
.rd32 = _nvkm_fifo_context_rd32,
|
||||
.wr32 = _nvkm_fifo_context_wr32,
|
||||
},
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* PFIFO engine
|
||||
******************************************************************************/
|
||||
|
||||
void
|
||||
nv04_fifo_pause(struct nvkm_fifo *obj, unsigned long *pflags)
|
||||
__acquires(fifo->base.lock)
|
||||
@ -552,36 +287,6 @@ nv04_fifo_intr(struct nvkm_subdev *subdev)
|
||||
nvkm_wr32(device, NV03_PFIFO_CACHES, reassign);
|
||||
}
|
||||
|
||||
static int
|
||||
nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nv04_fifo *fifo;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_create(parent, engine, oclass, 0, 15, &fifo);
|
||||
*pobject = nv_object(fifo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_subdev(fifo)->unit = 0x00000100;
|
||||
nv_subdev(fifo)->intr = nv04_fifo_intr;
|
||||
nv_engine(fifo)->cclass = &nv04_fifo_cclass;
|
||||
nv_engine(fifo)->sclass = nv04_fifo_sclass;
|
||||
fifo->base.pause = nv04_fifo_pause;
|
||||
fifo->base.start = nv04_fifo_start;
|
||||
fifo->ramfc_desc = nv04_ramfc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv04_fifo_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nv04_fifo *fifo = (void *)object;
|
||||
nvkm_fifo_destroy(&fifo->base);
|
||||
}
|
||||
|
||||
int
|
||||
nv04_fifo_init(struct nvkm_object *object)
|
||||
{
|
||||
@ -617,6 +322,36 @@ nv04_fifo_init(struct nvkm_object *object)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv04_fifo_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nv04_fifo *fifo = (void *)object;
|
||||
nvkm_fifo_destroy(&fifo->base);
|
||||
}
|
||||
|
||||
static int
|
||||
nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nv04_fifo *fifo;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_create(parent, engine, oclass, 0, 15, &fifo);
|
||||
*pobject = nv_object(fifo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_subdev(fifo)->unit = 0x00000100;
|
||||
nv_subdev(fifo)->intr = nv04_fifo_intr;
|
||||
nv_engine(fifo)->cclass = &nv04_fifo_cclass;
|
||||
nv_engine(fifo)->sclass = nv04_fifo_sclass;
|
||||
fifo->base.pause = nv04_fifo_pause;
|
||||
fifo->base.start = nv04_fifo_start;
|
||||
fifo->ramfc_desc = nv04_ramfc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_oclass *
|
||||
nv04_fifo_oclass = &(struct nvkm_oclass) {
|
||||
.handle = NV_ENGINE(FIFO, 0x04),
|
||||
|
@ -1,135 +1,6 @@
|
||||
#ifndef __NV04_FIFO_H__
|
||||
#define __NV04_FIFO_H__
|
||||
#include <engine/fifo.h>
|
||||
|
||||
#define NV04_PFIFO_DELAY_0 0x00002040
|
||||
#define NV04_PFIFO_DMA_TIMESLICE 0x00002044
|
||||
#define NV04_PFIFO_NEXT_CHANNEL 0x00002050
|
||||
#define NV03_PFIFO_INTR_0 0x00002100
|
||||
#define NV03_PFIFO_INTR_EN_0 0x00002140
|
||||
# define NV_PFIFO_INTR_CACHE_ERROR (1<<0)
|
||||
# define NV_PFIFO_INTR_RUNOUT (1<<4)
|
||||
# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8)
|
||||
# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
|
||||
# define NV_PFIFO_INTR_DMA_PT (1<<16)
|
||||
# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
|
||||
# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
|
||||
#define NV03_PFIFO_RAMHT 0x00002210
|
||||
#define NV03_PFIFO_RAMFC 0x00002214
|
||||
#define NV03_PFIFO_RAMRO 0x00002218
|
||||
#define NV40_PFIFO_RAMFC 0x00002220
|
||||
#define NV03_PFIFO_CACHES 0x00002500
|
||||
#define NV04_PFIFO_MODE 0x00002504
|
||||
#define NV04_PFIFO_DMA 0x00002508
|
||||
#define NV04_PFIFO_SIZE 0x0000250c
|
||||
#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
|
||||
#define NV50_PFIFO_CTX_TABLE__SIZE 128
|
||||
#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
|
||||
#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
|
||||
#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
|
||||
#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
|
||||
#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
|
||||
#define NV03_PFIFO_CACHE0_PULL0 0x00003040
|
||||
#define NV04_PFIFO_CACHE0_PULL0 0x00003050
|
||||
#define NV04_PFIFO_CACHE0_PULL1 0x00003054
|
||||
#define NV03_PFIFO_CACHE1_PUSH0 0x00003200
|
||||
#define NV03_PFIFO_CACHE1_PUSH1 0x00003204
|
||||
#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8)
|
||||
#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16)
|
||||
#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f
|
||||
#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f
|
||||
#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f
|
||||
#define NV03_PFIFO_CACHE1_PUT 0x00003210
|
||||
#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220
|
||||
#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000
|
||||
# define NV_PFIFO_CACHE1_ENDIAN 0x80000000
|
||||
# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF
|
||||
# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000
|
||||
#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228
|
||||
#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c
|
||||
#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230
|
||||
#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240
|
||||
#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244
|
||||
#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248
|
||||
#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
|
||||
#define NV03_PFIFO_CACHE1_PULL0 0x00003240
|
||||
#define NV04_PFIFO_CACHE1_PULL0 0x00003250
|
||||
# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010
|
||||
# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000
|
||||
#define NV03_PFIFO_CACHE1_PULL1 0x00003250
|
||||
#define NV04_PFIFO_CACHE1_PULL1 0x00003254
|
||||
#define NV04_PFIFO_CACHE1_HASH 0x00003258
|
||||
#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260
|
||||
#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264
|
||||
#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268
|
||||
#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
|
||||
#define NV03_PFIFO_CACHE1_GET 0x00003270
|
||||
#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
|
||||
#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
|
||||
#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
|
||||
#define NV40_PFIFO_UNK32E4 0x000032E4
|
||||
#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
|
||||
#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8))
|
||||
#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8))
|
||||
#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8))
|
||||
#include "priv.h"
|
||||
|
||||
struct ramfc_desc {
|
||||
unsigned bits:6;
|
||||
@ -148,25 +19,10 @@ struct nv04_fifo_base {
|
||||
struct nvkm_fifo_base base;
|
||||
};
|
||||
|
||||
struct nv04_fifo_chan {
|
||||
struct nvkm_fifo_chan base;
|
||||
u32 subc[8];
|
||||
u32 ramfc;
|
||||
};
|
||||
|
||||
int nv04_fifo_object_attach(struct nvkm_object *, struct nvkm_object *, u32);
|
||||
void nv04_fifo_object_detach(struct nvkm_object *, int);
|
||||
|
||||
void nv04_fifo_chan_dtor(struct nvkm_object *);
|
||||
int nv04_fifo_chan_init(struct nvkm_object *);
|
||||
int nv04_fifo_chan_fini(struct nvkm_object *, bool suspend);
|
||||
|
||||
int nv04_fifo_context_ctor(struct nvkm_object *, struct nvkm_object *,
|
||||
struct nvkm_oclass *, void *, u32,
|
||||
struct nvkm_object **);
|
||||
|
||||
void nv04_fifo_dtor(struct nvkm_object *);
|
||||
int nv04_fifo_init(struct nvkm_object *);
|
||||
void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
|
||||
void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
|
||||
#endif
|
||||
|
@ -22,13 +22,8 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "nv04.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <core/engctx.h>
|
||||
#include <subdev/instmem.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/unpack.h>
|
||||
#include "channv04.h"
|
||||
#include "regsnv04.h"
|
||||
|
||||
static struct ramfc_desc
|
||||
nv10_ramfc[] = {
|
||||
@ -44,85 +39,6 @@ nv10_ramfc[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* FIFO channel objects
|
||||
******************************************************************************/
|
||||
|
||||
static int
|
||||
nv10_fifo_chan_ctor(struct nvkm_object *parent,
|
||||
struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct nv03_channel_dma_v0 v0;
|
||||
} *args = data;
|
||||
struct nv04_fifo *fifo = (void *)engine;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
struct nv04_fifo_chan *chan;
|
||||
int ret;
|
||||
|
||||
nvif_ioctl(parent, "create channel dma size %d\n", size);
|
||||
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||
nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
|
||||
"offset %08x\n", args->v0.version,
|
||||
args->v0.pushbuf, args->v0.offset);
|
||||
} else
|
||||
return ret;
|
||||
|
||||
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
|
||||
0x10000, args->v0.pushbuf,
|
||||
(1ULL << NVDEV_ENGINE_DMAOBJ) |
|
||||
(1ULL << NVDEV_ENGINE_SW) |
|
||||
(1ULL << NVDEV_ENGINE_GR), &chan);
|
||||
*pobject = nv_object(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
args->v0.chid = chan->base.chid;
|
||||
|
||||
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
|
||||
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
|
||||
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
|
||||
chan->ramfc = chan->base.chid * 32;
|
||||
|
||||
nvkm_kmap(imem->ramfc);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
|
||||
#ifdef __BIG_ENDIAN
|
||||
NV_PFIFO_CACHE1_BIG_ENDIAN |
|
||||
#endif
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
|
||||
nvkm_done(imem->ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nvkm_ofuncs
|
||||
nv10_fifo_ofuncs = {
|
||||
.ctor = nv10_fifo_chan_ctor,
|
||||
.dtor = nv04_fifo_chan_dtor,
|
||||
.init = nv04_fifo_chan_init,
|
||||
.fini = nv04_fifo_chan_fini,
|
||||
.map = _nvkm_fifo_channel_map,
|
||||
.rd32 = _nvkm_fifo_channel_rd32,
|
||||
.wr32 = _nvkm_fifo_channel_wr32,
|
||||
.ntfy = _nvkm_fifo_channel_ntfy
|
||||
};
|
||||
|
||||
static struct nvkm_oclass
|
||||
nv10_fifo_sclass[] = {
|
||||
{ NV10_CHANNEL_DMA, &nv10_fifo_ofuncs },
|
||||
{}
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* FIFO context - basically just the instmem reserved for the channel
|
||||
******************************************************************************/
|
||||
|
||||
static struct nvkm_oclass
|
||||
nv10_fifo_cclass = {
|
||||
.handle = NV_ENGCTX(FIFO, 0x10),
|
||||
@ -136,10 +52,6 @@ nv10_fifo_cclass = {
|
||||
},
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* PFIFO engine
|
||||
******************************************************************************/
|
||||
|
||||
static int
|
||||
nv10_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
|
@ -22,15 +22,12 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "nv04.h"
|
||||
#include "channv04.h"
|
||||
#include "regsnv04.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <core/engctx.h>
|
||||
#include <core/ramht.h>
|
||||
#include <subdev/instmem.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
static struct ramfc_desc
|
||||
nv17_ramfc[] = {
|
||||
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
|
||||
@ -50,87 +47,6 @@ nv17_ramfc[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* FIFO channel objects
|
||||
******************************************************************************/
|
||||
|
||||
static int
|
||||
nv17_fifo_chan_ctor(struct nvkm_object *parent,
|
||||
struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct nv03_channel_dma_v0 v0;
|
||||
} *args = data;
|
||||
struct nv04_fifo *fifo = (void *)engine;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
struct nv04_fifo_chan *chan;
|
||||
int ret;
|
||||
|
||||
nvif_ioctl(parent, "create channel dma size %d\n", size);
|
||||
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||
nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
|
||||
"offset %08x\n", args->v0.version,
|
||||
args->v0.pushbuf, args->v0.offset);
|
||||
} else
|
||||
return ret;
|
||||
|
||||
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
|
||||
0x10000, args->v0.pushbuf,
|
||||
(1ULL << NVDEV_ENGINE_DMAOBJ) |
|
||||
(1ULL << NVDEV_ENGINE_SW) |
|
||||
(1ULL << NVDEV_ENGINE_GR) |
|
||||
(1ULL << NVDEV_ENGINE_MPEG), /* NV31- */
|
||||
&chan);
|
||||
*pobject = nv_object(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
args->v0.chid = chan->base.chid;
|
||||
|
||||
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
|
||||
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
|
||||
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
|
||||
chan->ramfc = chan->base.chid * 64;
|
||||
|
||||
nvkm_kmap(imem->ramfc);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
|
||||
#ifdef __BIG_ENDIAN
|
||||
NV_PFIFO_CACHE1_BIG_ENDIAN |
|
||||
#endif
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
|
||||
nvkm_done(imem->ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nvkm_ofuncs
|
||||
nv17_fifo_ofuncs = {
|
||||
.ctor = nv17_fifo_chan_ctor,
|
||||
.dtor = nv04_fifo_chan_dtor,
|
||||
.init = nv04_fifo_chan_init,
|
||||
.fini = nv04_fifo_chan_fini,
|
||||
.map = _nvkm_fifo_channel_map,
|
||||
.rd32 = _nvkm_fifo_channel_rd32,
|
||||
.wr32 = _nvkm_fifo_channel_wr32,
|
||||
.ntfy = _nvkm_fifo_channel_ntfy
|
||||
};
|
||||
|
||||
static struct nvkm_oclass
|
||||
nv17_fifo_sclass[] = {
|
||||
{ NV17_CHANNEL_DMA, &nv17_fifo_ofuncs },
|
||||
{}
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* FIFO context - basically just the instmem reserved for the channel
|
||||
******************************************************************************/
|
||||
|
||||
static struct nvkm_oclass
|
||||
nv17_fifo_cclass = {
|
||||
.handle = NV_ENGCTX(FIFO, 0x17),
|
||||
@ -144,33 +60,6 @@ nv17_fifo_cclass = {
|
||||
},
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* PFIFO engine
|
||||
******************************************************************************/
|
||||
|
||||
static int
|
||||
nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nv04_fifo *fifo;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &fifo);
|
||||
*pobject = nv_object(fifo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_subdev(fifo)->unit = 0x00000100;
|
||||
nv_subdev(fifo)->intr = nv04_fifo_intr;
|
||||
nv_engine(fifo)->cclass = &nv17_fifo_cclass;
|
||||
nv_engine(fifo)->sclass = nv17_fifo_sclass;
|
||||
fifo->base.pause = nv04_fifo_pause;
|
||||
fifo->base.start = nv04_fifo_start;
|
||||
fifo->ramfc_desc = nv17_ramfc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv17_fifo_init(struct nvkm_object *object)
|
||||
{
|
||||
@ -207,6 +96,29 @@ nv17_fifo_init(struct nvkm_object *object)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nv04_fifo *fifo;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &fifo);
|
||||
*pobject = nv_object(fifo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_subdev(fifo)->unit = 0x00000100;
|
||||
nv_subdev(fifo)->intr = nv04_fifo_intr;
|
||||
nv_engine(fifo)->cclass = &nv17_fifo_cclass;
|
||||
nv_engine(fifo)->sclass = nv17_fifo_sclass;
|
||||
fifo->base.pause = nv04_fifo_pause;
|
||||
fifo->base.start = nv04_fifo_start;
|
||||
fifo->ramfc_desc = nv17_ramfc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_oclass *
|
||||
nv17_fifo_oclass = &(struct nvkm_oclass) {
|
||||
.handle = NV_ENGINE(FIFO, 0x17),
|
||||
|
@ -22,16 +22,13 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "nv04.h"
|
||||
#include "channv04.h"
|
||||
#include "regsnv04.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <core/engctx.h>
|
||||
#include <core/ramht.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/instmem.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
static struct ramfc_desc
|
||||
nv40_ramfc[] = {
|
||||
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
|
||||
@ -59,207 +56,6 @@ nv40_ramfc[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* FIFO channel objects
|
||||
******************************************************************************/
|
||||
|
||||
static int
|
||||
nv40_fifo_object_attach(struct nvkm_object *parent,
|
||||
struct nvkm_object *object, u32 handle)
|
||||
{
|
||||
struct nv04_fifo *fifo = (void *)parent->engine;
|
||||
struct nv04_fifo_chan *chan = (void *)parent;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
u32 context, chid = chan->base.chid;
|
||||
int ret;
|
||||
|
||||
if (nv_iclass(object, NV_GPUOBJ_CLASS))
|
||||
context = nv_gpuobj(object)->addr >> 4;
|
||||
else
|
||||
context = 0x00000004; /* just non-zero */
|
||||
|
||||
if (object->engine) {
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_DMAOBJ:
|
||||
case NVDEV_ENGINE_SW:
|
||||
context |= 0x00000000;
|
||||
break;
|
||||
case NVDEV_ENGINE_GR:
|
||||
context |= 0x00100000;
|
||||
break;
|
||||
case NVDEV_ENGINE_MPEG:
|
||||
context |= 0x00200000;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
context |= chid << 23;
|
||||
|
||||
mutex_lock(&nv_subdev(fifo)->mutex);
|
||||
ret = nvkm_ramht_insert(imem->ramht, NULL, chid, 0, handle, context);
|
||||
mutex_unlock(&nv_subdev(fifo)->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
|
||||
{
|
||||
struct nv04_fifo *fifo = (void *)parent->engine;
|
||||
struct nv04_fifo_chan *chan = (void *)parent;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
struct nvkm_instmem *imem = device->imem;
|
||||
unsigned long flags;
|
||||
u32 reg, ctx;
|
||||
|
||||
switch (nv_engidx(engctx->engine)) {
|
||||
case NVDEV_ENGINE_SW:
|
||||
return 0;
|
||||
case NVDEV_ENGINE_GR:
|
||||
reg = 0x32e0;
|
||||
ctx = 0x38;
|
||||
break;
|
||||
case NVDEV_ENGINE_MPEG:
|
||||
reg = 0x330c;
|
||||
ctx = 0x54;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&fifo->base.lock, flags);
|
||||
nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4;
|
||||
nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
|
||||
|
||||
if ((nvkm_rd32(device, 0x003204) & fifo->base.max) == chan->base.chid)
|
||||
nvkm_wr32(device, reg, nv_engctx(engctx)->addr);
|
||||
nvkm_kmap(imem->ramfc);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
|
||||
nvkm_done(imem->ramfc);
|
||||
|
||||
nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
|
||||
spin_unlock_irqrestore(&fifo->base.lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
|
||||
struct nvkm_object *engctx)
|
||||
{
|
||||
struct nv04_fifo *fifo = (void *)parent->engine;
|
||||
struct nv04_fifo_chan *chan = (void *)parent;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
struct nvkm_instmem *imem = device->imem;
|
||||
unsigned long flags;
|
||||
u32 reg, ctx;
|
||||
|
||||
switch (nv_engidx(engctx->engine)) {
|
||||
case NVDEV_ENGINE_SW:
|
||||
return 0;
|
||||
case NVDEV_ENGINE_GR:
|
||||
reg = 0x32e0;
|
||||
ctx = 0x38;
|
||||
break;
|
||||
case NVDEV_ENGINE_MPEG:
|
||||
reg = 0x330c;
|
||||
ctx = 0x54;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&fifo->base.lock, flags);
|
||||
nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
|
||||
|
||||
if ((nvkm_rd32(device, 0x003204) & fifo->base.max) == chan->base.chid)
|
||||
nvkm_wr32(device, reg, 0x00000000);
|
||||
nvkm_kmap(imem->ramfc);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + ctx, 0x00000000);
|
||||
nvkm_done(imem->ramfc);
|
||||
|
||||
nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
|
||||
spin_unlock_irqrestore(&fifo->base.lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct nv03_channel_dma_v0 v0;
|
||||
} *args = data;
|
||||
struct nv04_fifo *fifo = (void *)engine;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
struct nv04_fifo_chan *chan;
|
||||
int ret;
|
||||
|
||||
nvif_ioctl(parent, "create channel dma size %d\n", size);
|
||||
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||
nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
|
||||
"offset %08x\n", args->v0.version,
|
||||
args->v0.pushbuf, args->v0.offset);
|
||||
} else
|
||||
return ret;
|
||||
|
||||
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
|
||||
0x1000, args->v0.pushbuf,
|
||||
(1ULL << NVDEV_ENGINE_DMAOBJ) |
|
||||
(1ULL << NVDEV_ENGINE_SW) |
|
||||
(1ULL << NVDEV_ENGINE_GR) |
|
||||
(1ULL << NVDEV_ENGINE_MPEG), &chan);
|
||||
*pobject = nv_object(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
args->v0.chid = chan->base.chid;
|
||||
|
||||
nv_parent(chan)->context_attach = nv40_fifo_context_attach;
|
||||
nv_parent(chan)->context_detach = nv40_fifo_context_detach;
|
||||
nv_parent(chan)->object_attach = nv40_fifo_object_attach;
|
||||
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
|
||||
chan->ramfc = chan->base.chid * 128;
|
||||
|
||||
nvkm_kmap(imem->ramfc);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x18, 0x30000000 |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
|
||||
#ifdef __BIG_ENDIAN
|
||||
NV_PFIFO_CACHE1_BIG_ENDIAN |
|
||||
#endif
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
|
||||
nvkm_done(imem->ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nvkm_ofuncs
|
||||
nv40_fifo_ofuncs = {
|
||||
.ctor = nv40_fifo_chan_ctor,
|
||||
.dtor = nv04_fifo_chan_dtor,
|
||||
.init = nv04_fifo_chan_init,
|
||||
.fini = nv04_fifo_chan_fini,
|
||||
.map = _nvkm_fifo_channel_map,
|
||||
.rd32 = _nvkm_fifo_channel_rd32,
|
||||
.wr32 = _nvkm_fifo_channel_wr32,
|
||||
.ntfy = _nvkm_fifo_channel_ntfy
|
||||
};
|
||||
|
||||
static struct nvkm_oclass
|
||||
nv40_fifo_sclass[] = {
|
||||
{ NV40_CHANNEL_DMA, &nv40_fifo_ofuncs },
|
||||
{}
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* FIFO context - basically just the instmem reserved for the channel
|
||||
******************************************************************************/
|
||||
|
||||
static struct nvkm_oclass
|
||||
nv40_fifo_cclass = {
|
||||
.handle = NV_ENGCTX(FIFO, 0x40),
|
||||
@ -273,33 +69,6 @@ nv40_fifo_cclass = {
|
||||
},
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* PFIFO engine
|
||||
******************************************************************************/
|
||||
|
||||
static int
|
||||
nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nv04_fifo *fifo;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &fifo);
|
||||
*pobject = nv_object(fifo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_subdev(fifo)->unit = 0x00000100;
|
||||
nv_subdev(fifo)->intr = nv04_fifo_intr;
|
||||
nv_engine(fifo)->cclass = &nv40_fifo_cclass;
|
||||
nv_engine(fifo)->sclass = nv40_fifo_sclass;
|
||||
fifo->base.pause = nv04_fifo_pause;
|
||||
fifo->base.start = nv04_fifo_start;
|
||||
fifo->ramfc_desc = nv40_ramfc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv40_fifo_init(struct nvkm_object *object)
|
||||
{
|
||||
@ -357,6 +126,29 @@ nv40_fifo_init(struct nvkm_object *object)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nv04_fifo *fifo;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &fifo);
|
||||
*pobject = nv_object(fifo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_subdev(fifo)->unit = 0x00000100;
|
||||
nv_subdev(fifo)->intr = nv04_fifo_intr;
|
||||
nv_engine(fifo)->cclass = &nv40_fifo_cclass;
|
||||
nv_engine(fifo)->sclass = nv40_fifo_sclass;
|
||||
fifo->base.pause = nv04_fifo_pause;
|
||||
fifo->base.start = nv04_fifo_start;
|
||||
fifo->ramfc_desc = nv40_ramfc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_oclass *
|
||||
nv40_fifo_oclass = &(struct nvkm_oclass) {
|
||||
.handle = NV_ENGINE(FIFO, 0x40),
|
||||
|
@ -22,30 +22,17 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "nv50.h"
|
||||
#include "nv04.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <core/engctx.h>
|
||||
#include <core/ramht.h>
|
||||
#include <subdev/mmu.h>
|
||||
#include <subdev/timer.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
/*******************************************************************************
|
||||
* FIFO channel objects
|
||||
******************************************************************************/
|
||||
#include "channv50.h"
|
||||
|
||||
static void
|
||||
nv50_fifo_playlist_update_locked(struct nv50_fifo *fifo)
|
||||
nv50_fifo_runlist_update_locked(struct nv50_fifo *fifo)
|
||||
{
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
struct nvkm_memory *cur;
|
||||
int i, p;
|
||||
|
||||
cur = fifo->playlist[fifo->cur_playlist];
|
||||
fifo->cur_playlist = !fifo->cur_playlist;
|
||||
cur = fifo->runlist[fifo->cur_runlist];
|
||||
fifo->cur_runlist = !fifo->cur_runlist;
|
||||
|
||||
nvkm_kmap(cur);
|
||||
for (i = fifo->base.min, p = 0; i < fifo->base.max; i++) {
|
||||
@ -60,459 +47,13 @@ nv50_fifo_playlist_update_locked(struct nv50_fifo *fifo)
|
||||
}
|
||||
|
||||
void
|
||||
nv50_fifo_playlist_update(struct nv50_fifo *fifo)
|
||||
nv50_fifo_runlist_update(struct nv50_fifo *fifo)
|
||||
{
|
||||
mutex_lock(&nv_subdev(fifo)->mutex);
|
||||
nv50_fifo_playlist_update_locked(fifo);
|
||||
nv50_fifo_runlist_update_locked(fifo);
|
||||
mutex_unlock(&nv_subdev(fifo)->mutex);
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
|
||||
{
|
||||
struct nv50_fifo_base *base = (void *)parent->parent;
|
||||
struct nvkm_gpuobj *ectx = (void *)object;
|
||||
u64 limit = ectx->addr + ectx->size - 1;
|
||||
u64 start = ectx->addr;
|
||||
u32 addr;
|
||||
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_SW : return 0;
|
||||
case NVDEV_ENGINE_GR : addr = 0x0000; break;
|
||||
case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
|
||||
|
||||
nvkm_kmap(base->eng);
|
||||
nvkm_wo32(base->eng, addr + 0x00, 0x00190000);
|
||||
nvkm_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
|
||||
nvkm_wo32(base->eng, addr + 0x08, lower_32_bits(start));
|
||||
nvkm_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
|
||||
upper_32_bits(start));
|
||||
nvkm_wo32(base->eng, addr + 0x10, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x14, 0x00000000);
|
||||
nvkm_done(base->eng);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend,
|
||||
struct nvkm_object *object)
|
||||
{
|
||||
struct nv50_fifo *fifo = (void *)parent->engine;
|
||||
struct nv50_fifo_base *base = (void *)parent->parent;
|
||||
struct nv50_fifo_chan *chan = (void *)parent;
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 addr, me;
|
||||
int ret = 0;
|
||||
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_SW : return 0;
|
||||
case NVDEV_ENGINE_GR : addr = 0x0000; break;
|
||||
case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* HW bug workaround:
|
||||
*
|
||||
* PFIFO will hang forever if the connected engines don't report
|
||||
* that they've processed the context switch request.
|
||||
*
|
||||
* In order for the kickoff to work, we need to ensure all the
|
||||
* connected engines are in a state where they can answer.
|
||||
*
|
||||
* Newer chipsets don't seem to suffer from this issue, and well,
|
||||
* there's also a "ignore these engines" bitmask reg we can use
|
||||
* if we hit the issue there..
|
||||
*/
|
||||
me = nvkm_mask(device, 0x00b860, 0x00000001, 0x00000001);
|
||||
|
||||
/* do the kickoff... */
|
||||
nvkm_wr32(device, 0x0032fc, nv_gpuobj(base)->addr >> 12);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
|
||||
break;
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "channel %d [%s] unload timeout\n",
|
||||
chan->base.chid, nvkm_client_name(chan));
|
||||
if (suspend)
|
||||
ret = -EBUSY;
|
||||
}
|
||||
nvkm_wr32(device, 0x00b860, me);
|
||||
|
||||
if (ret == 0) {
|
||||
nvkm_kmap(base->eng);
|
||||
nvkm_wo32(base->eng, addr + 0x00, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x04, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x08, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x0c, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x10, 0x00000000);
|
||||
nvkm_wo32(base->eng, addr + 0x14, 0x00000000);
|
||||
nvkm_done(base->eng);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_fifo_object_attach(struct nvkm_object *parent,
|
||||
struct nvkm_object *object, u32 handle)
|
||||
{
|
||||
struct nv50_fifo_chan *chan = (void *)parent;
|
||||
u32 context;
|
||||
|
||||
if (nv_iclass(object, NV_GPUOBJ_CLASS))
|
||||
context = nv_gpuobj(object)->node->offset >> 4;
|
||||
else
|
||||
context = 0x00000004; /* just non-zero */
|
||||
|
||||
if (object->engine) {
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_DMAOBJ:
|
||||
case NVDEV_ENGINE_SW : context |= 0x00000000; break;
|
||||
case NVDEV_ENGINE_GR : context |= 0x00100000; break;
|
||||
case NVDEV_ENGINE_MPEG : context |= 0x00200000; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return nvkm_ramht_insert(chan->ramht, NULL, 0, 0, handle, context);
|
||||
}
|
||||
|
||||
void
|
||||
nv50_fifo_object_detach(struct nvkm_object *parent, int cookie)
|
||||
{
|
||||
struct nv50_fifo_chan *chan = (void *)parent;
|
||||
nvkm_ramht_remove(chan->ramht, cookie);
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct nv50_channel_dma_v0 v0;
|
||||
} *args = data;
|
||||
struct nvkm_device *device = parent->engine->subdev.device;
|
||||
struct nv50_fifo_base *base = (void *)parent;
|
||||
struct nv50_fifo_chan *chan;
|
||||
int ret;
|
||||
|
||||
nvif_ioctl(parent, "create channel dma size %d\n", size);
|
||||
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||
nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
|
||||
"offset %016llx\n", args->v0.version,
|
||||
args->v0.pushbuf, args->v0.offset);
|
||||
if (args->v0.vm)
|
||||
return -ENOENT;
|
||||
} else
|
||||
return ret;
|
||||
|
||||
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
|
||||
0x2000, args->v0.pushbuf,
|
||||
(1ULL << NVDEV_ENGINE_DMAOBJ) |
|
||||
(1ULL << NVDEV_ENGINE_SW) |
|
||||
(1ULL << NVDEV_ENGINE_GR) |
|
||||
(1ULL << NVDEV_ENGINE_MPEG), &chan);
|
||||
*pobject = nv_object(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
chan->base.inst = base->base.gpuobj.addr;
|
||||
args->v0.chid = chan->base.chid;
|
||||
|
||||
nv_parent(chan)->context_attach = nv50_fifo_context_attach;
|
||||
nv_parent(chan)->context_detach = nv50_fifo_context_detach;
|
||||
nv_parent(chan)->object_attach = nv50_fifo_object_attach;
|
||||
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
|
||||
|
||||
ret = nvkm_ramht_new(device, 0x8000, 16, &base->base.gpuobj,
|
||||
&chan->ramht);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_kmap(base->ramfc);
|
||||
nvkm_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
|
||||
nvkm_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
|
||||
nvkm_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
|
||||
nvkm_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
|
||||
nvkm_wo32(base->ramfc, 0x3c, 0x003f6078);
|
||||
nvkm_wo32(base->ramfc, 0x44, 0x01003fff);
|
||||
nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
|
||||
nvkm_wo32(base->ramfc, 0x4c, 0xffffffff);
|
||||
nvkm_wo32(base->ramfc, 0x60, 0x7fffffff);
|
||||
nvkm_wo32(base->ramfc, 0x78, 0x00000000);
|
||||
nvkm_wo32(base->ramfc, 0x7c, 0x30000001);
|
||||
nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
|
||||
(4 << 24) /* SEARCH_FULL */ |
|
||||
(chan->ramht->gpuobj->node->offset >> 4));
|
||||
nvkm_done(base->ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct nv50_channel_gpfifo_v0 v0;
|
||||
} *args = data;
|
||||
struct nvkm_device *device = parent->engine->subdev.device;
|
||||
struct nv50_fifo_base *base = (void *)parent;
|
||||
struct nv50_fifo_chan *chan;
|
||||
u64 ioffset, ilength;
|
||||
int ret;
|
||||
|
||||
nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
|
||||
if (nvif_unpack(args->v0, 0, 0, false)) {
|
||||
nvif_ioctl(parent, "create channel gpfifo vers %d pushbuf %llx "
|
||||
"ioffset %016llx ilength %08x\n",
|
||||
args->v0.version, args->v0.pushbuf, args->v0.ioffset,
|
||||
args->v0.ilength);
|
||||
if (args->v0.vm)
|
||||
return -ENOENT;
|
||||
} else
|
||||
return ret;
|
||||
|
||||
ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
|
||||
0x2000, args->v0.pushbuf,
|
||||
(1ULL << NVDEV_ENGINE_DMAOBJ) |
|
||||
(1ULL << NVDEV_ENGINE_SW) |
|
||||
(1ULL << NVDEV_ENGINE_GR) |
|
||||
(1ULL << NVDEV_ENGINE_MPEG), &chan);
|
||||
*pobject = nv_object(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
chan->base.inst = base->base.gpuobj.addr;
|
||||
args->v0.chid = chan->base.chid;
|
||||
|
||||
nv_parent(chan)->context_attach = nv50_fifo_context_attach;
|
||||
nv_parent(chan)->context_detach = nv50_fifo_context_detach;
|
||||
nv_parent(chan)->object_attach = nv50_fifo_object_attach;
|
||||
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
|
||||
|
||||
ret = nvkm_ramht_new(device, 0x8000, 16, &base->base.gpuobj,
|
||||
&chan->ramht);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ioffset = args->v0.ioffset;
|
||||
ilength = order_base_2(args->v0.ilength / 8);
|
||||
|
||||
nvkm_kmap(base->ramfc);
|
||||
nvkm_wo32(base->ramfc, 0x3c, 0x403f6078);
|
||||
nvkm_wo32(base->ramfc, 0x44, 0x01003fff);
|
||||
nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
|
||||
nvkm_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
|
||||
nvkm_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
|
||||
nvkm_wo32(base->ramfc, 0x60, 0x7fffffff);
|
||||
nvkm_wo32(base->ramfc, 0x78, 0x00000000);
|
||||
nvkm_wo32(base->ramfc, 0x7c, 0x30000001);
|
||||
nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
|
||||
(4 << 24) /* SEARCH_FULL */ |
|
||||
(chan->ramht->gpuobj->node->offset >> 4));
|
||||
nvkm_done(base->ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv50_fifo_chan_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nv50_fifo_chan *chan = (void *)object;
|
||||
nvkm_ramht_del(&chan->ramht);
|
||||
nvkm_fifo_channel_destroy(&chan->base);
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_fifo_chan_init(struct nvkm_object *object)
|
||||
{
|
||||
struct nv50_fifo *fifo = (void *)object->engine;
|
||||
struct nv50_fifo_base *base = (void *)object->parent;
|
||||
struct nv50_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_gpuobj *ramfc = base->ramfc;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
u32 chid = chan->base.chid;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_channel_init(&chan->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12);
|
||||
nv50_fifo_playlist_update(fifo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nv50_fifo_chan_fini(struct nvkm_object *object, bool suspend)
|
||||
{
|
||||
struct nv50_fifo *fifo = (void *)object->engine;
|
||||
struct nv50_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
u32 chid = chan->base.chid;
|
||||
|
||||
/* remove channel from playlist, fifo will unload context */
|
||||
nvkm_mask(device, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
|
||||
nv50_fifo_playlist_update(fifo);
|
||||
nvkm_wr32(device, 0x002600 + (chid * 4), 0x00000000);
|
||||
|
||||
return nvkm_fifo_channel_fini(&chan->base, suspend);
|
||||
}
|
||||
|
||||
static struct nvkm_ofuncs
|
||||
nv50_fifo_ofuncs_dma = {
|
||||
.ctor = nv50_fifo_chan_ctor_dma,
|
||||
.dtor = nv50_fifo_chan_dtor,
|
||||
.init = nv50_fifo_chan_init,
|
||||
.fini = nv50_fifo_chan_fini,
|
||||
.map = _nvkm_fifo_channel_map,
|
||||
.rd32 = _nvkm_fifo_channel_rd32,
|
||||
.wr32 = _nvkm_fifo_channel_wr32,
|
||||
.ntfy = _nvkm_fifo_channel_ntfy
|
||||
};
|
||||
|
||||
static struct nvkm_ofuncs
|
||||
nv50_fifo_ofuncs_ind = {
|
||||
.ctor = nv50_fifo_chan_ctor_ind,
|
||||
.dtor = nv50_fifo_chan_dtor,
|
||||
.init = nv50_fifo_chan_init,
|
||||
.fini = nv50_fifo_chan_fini,
|
||||
.map = _nvkm_fifo_channel_map,
|
||||
.rd32 = _nvkm_fifo_channel_rd32,
|
||||
.wr32 = _nvkm_fifo_channel_wr32,
|
||||
.ntfy = _nvkm_fifo_channel_ntfy
|
||||
};
|
||||
|
||||
static struct nvkm_oclass
|
||||
nv50_fifo_sclass[] = {
|
||||
{ NV50_CHANNEL_DMA, &nv50_fifo_ofuncs_dma },
|
||||
{ NV50_CHANNEL_GPFIFO, &nv50_fifo_ofuncs_ind },
|
||||
{}
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* FIFO context - basically just the instmem reserved for the channel
|
||||
******************************************************************************/
|
||||
|
||||
static int
|
||||
nv50_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_device *device = nv_engine(engine)->subdev.device;
|
||||
struct nv50_fifo_base *base;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
|
||||
0x1000, NVOBJ_FLAG_HEAP, &base);
|
||||
*pobject = nv_object(base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(device, 0x0200, 0x1000, true, &base->base.gpuobj,
|
||||
&base->ramfc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(device, 0x1200, 0, true, &base->base.gpuobj,
|
||||
&base->eng);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(device, 0x4000, 0, false, &base->base.gpuobj,
|
||||
&base->pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv50_fifo_context_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nv50_fifo_base *base = (void *)object;
|
||||
nvkm_vm_ref(NULL, &base->vm, base->pgd);
|
||||
nvkm_gpuobj_del(&base->pgd);
|
||||
nvkm_gpuobj_del(&base->eng);
|
||||
nvkm_gpuobj_del(&base->ramfc);
|
||||
nvkm_gpuobj_del(&base->cache);
|
||||
nvkm_fifo_context_destroy(&base->base);
|
||||
}
|
||||
|
||||
static struct nvkm_oclass
|
||||
nv50_fifo_cclass = {
|
||||
.handle = NV_ENGCTX(FIFO, 0x50),
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = nv50_fifo_context_ctor,
|
||||
.dtor = nv50_fifo_context_dtor,
|
||||
.init = _nvkm_fifo_context_init,
|
||||
.fini = _nvkm_fifo_context_fini,
|
||||
.rd32 = _nvkm_fifo_context_rd32,
|
||||
.wr32 = _nvkm_fifo_context_wr32,
|
||||
},
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* PFIFO engine
|
||||
******************************************************************************/
|
||||
|
||||
static int
|
||||
nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_device *device = (void *)parent;
|
||||
struct nv50_fifo *fifo;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &fifo);
|
||||
*pobject = nv_object(fifo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
|
||||
false, &fifo->playlist[0]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
|
||||
false, &fifo->playlist[1]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_subdev(fifo)->unit = 0x00000100;
|
||||
nv_subdev(fifo)->intr = nv04_fifo_intr;
|
||||
nv_engine(fifo)->cclass = &nv50_fifo_cclass;
|
||||
nv_engine(fifo)->sclass = nv50_fifo_sclass;
|
||||
fifo->base.pause = nv04_fifo_pause;
|
||||
fifo->base.start = nv04_fifo_start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv50_fifo_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nv50_fifo *fifo = (void *)object;
|
||||
|
||||
nvkm_memory_del(&fifo->playlist[1]);
|
||||
nvkm_memory_del(&fifo->playlist[0]);
|
||||
|
||||
nvkm_fifo_destroy(&fifo->base);
|
||||
}
|
||||
|
||||
int
|
||||
nv50_fifo_init(struct nvkm_object *object)
|
||||
{
|
||||
@ -534,7 +75,7 @@ nv50_fifo_init(struct nvkm_object *object)
|
||||
|
||||
for (i = 0; i < 128; i++)
|
||||
nvkm_wr32(device, 0x002600 + (i * 4), 0x00000000);
|
||||
nv50_fifo_playlist_update_locked(fifo);
|
||||
nv50_fifo_runlist_update_locked(fifo);
|
||||
|
||||
nvkm_wr32(device, 0x003200, 0x00000001);
|
||||
nvkm_wr32(device, 0x003250, 0x00000001);
|
||||
@ -542,6 +83,50 @@ nv50_fifo_init(struct nvkm_object *object)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv50_fifo_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nv50_fifo *fifo = (void *)object;
|
||||
|
||||
nvkm_memory_del(&fifo->runlist[1]);
|
||||
nvkm_memory_del(&fifo->runlist[0]);
|
||||
|
||||
nvkm_fifo_destroy(&fifo->base);
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_device *device = (void *)parent;
|
||||
struct nv50_fifo *fifo;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &fifo);
|
||||
*pobject = nv_object(fifo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
|
||||
false, &fifo->runlist[0]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
|
||||
false, &fifo->runlist[1]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_subdev(fifo)->unit = 0x00000100;
|
||||
nv_subdev(fifo)->intr = nv04_fifo_intr;
|
||||
nv_engine(fifo)->cclass = &nv50_fifo_cclass;
|
||||
nv_engine(fifo)->sclass = nv50_fifo_sclass;
|
||||
fifo->base.pause = nv04_fifo_pause;
|
||||
fifo->base.start = nv04_fifo_start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_oclass *
|
||||
nv50_fifo_oclass = &(struct nvkm_oclass) {
|
||||
.handle = NV_ENGINE(FIFO, 0x50),
|
||||
|
@ -1,36 +1,14 @@
|
||||
#ifndef __NV50_FIFO_H__
|
||||
#define __NV50_FIFO_H__
|
||||
#include <engine/fifo.h>
|
||||
#include "priv.h"
|
||||
|
||||
struct nv50_fifo {
|
||||
struct nvkm_fifo base;
|
||||
struct nvkm_memory *playlist[2];
|
||||
int cur_playlist;
|
||||
struct nvkm_memory *runlist[2];
|
||||
int cur_runlist;
|
||||
};
|
||||
|
||||
struct nv50_fifo_base {
|
||||
struct nvkm_fifo_base base;
|
||||
struct nvkm_gpuobj *ramfc;
|
||||
struct nvkm_gpuobj *cache;
|
||||
struct nvkm_gpuobj *eng;
|
||||
struct nvkm_gpuobj *pgd;
|
||||
struct nvkm_vm *vm;
|
||||
};
|
||||
|
||||
struct nv50_fifo_chan {
|
||||
struct nvkm_fifo_chan base;
|
||||
u32 subc[8];
|
||||
struct nvkm_ramht *ramht;
|
||||
};
|
||||
|
||||
void nv50_fifo_playlist_update(struct nv50_fifo *);
|
||||
|
||||
void nv50_fifo_object_detach(struct nvkm_object *, int);
|
||||
void nv50_fifo_chan_dtor(struct nvkm_object *);
|
||||
int nv50_fifo_chan_fini(struct nvkm_object *, bool);
|
||||
|
||||
void nv50_fifo_context_dtor(struct nvkm_object *);
|
||||
|
||||
void nv50_fifo_dtor(struct nvkm_object *);
|
||||
int nv50_fifo_init(struct nvkm_object *);
|
||||
void nv50_fifo_runlist_update(struct nv50_fifo *);
|
||||
#endif
|
||||
|
8
drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
Normal file
8
drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
Normal file
@ -0,0 +1,8 @@
|
||||
#ifndef __NVKM_FIFO_PRIV_H__
|
||||
#define __NVKM_FIFO_PRIV_H__
|
||||
#include <engine/fifo.h>
|
||||
#include <core/engctx.h>
|
||||
|
||||
void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
|
||||
void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
|
||||
#endif
|
132
drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
Normal file
132
drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
Normal file
@ -0,0 +1,132 @@
|
||||
#ifndef __NV04_FIFO_REGS_H__
|
||||
#define __NV04_FIFO_REGS_H__
|
||||
|
||||
#define NV04_PFIFO_DELAY_0 0x00002040
|
||||
#define NV04_PFIFO_DMA_TIMESLICE 0x00002044
|
||||
#define NV04_PFIFO_NEXT_CHANNEL 0x00002050
|
||||
#define NV03_PFIFO_INTR_0 0x00002100
|
||||
#define NV03_PFIFO_INTR_EN_0 0x00002140
|
||||
# define NV_PFIFO_INTR_CACHE_ERROR (1<<0)
|
||||
# define NV_PFIFO_INTR_RUNOUT (1<<4)
|
||||
# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8)
|
||||
# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
|
||||
# define NV_PFIFO_INTR_DMA_PT (1<<16)
|
||||
# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
|
||||
# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
|
||||
#define NV03_PFIFO_RAMHT 0x00002210
|
||||
#define NV03_PFIFO_RAMFC 0x00002214
|
||||
#define NV03_PFIFO_RAMRO 0x00002218
|
||||
#define NV40_PFIFO_RAMFC 0x00002220
|
||||
#define NV03_PFIFO_CACHES 0x00002500
|
||||
#define NV04_PFIFO_MODE 0x00002504
|
||||
#define NV04_PFIFO_DMA 0x00002508
|
||||
#define NV04_PFIFO_SIZE 0x0000250c
|
||||
#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
|
||||
#define NV50_PFIFO_CTX_TABLE__SIZE 128
|
||||
#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
|
||||
#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
|
||||
#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
|
||||
#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
|
||||
#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
|
||||
#define NV03_PFIFO_CACHE0_PULL0 0x00003040
|
||||
#define NV04_PFIFO_CACHE0_PULL0 0x00003050
|
||||
#define NV04_PFIFO_CACHE0_PULL1 0x00003054
|
||||
#define NV03_PFIFO_CACHE1_PUSH0 0x00003200
|
||||
#define NV03_PFIFO_CACHE1_PUSH1 0x00003204
|
||||
#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8)
|
||||
#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16)
|
||||
#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f
|
||||
#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f
|
||||
#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f
|
||||
#define NV03_PFIFO_CACHE1_PUT 0x00003210
|
||||
#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220
|
||||
#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000
|
||||
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000
|
||||
# define NV_PFIFO_CACHE1_ENDIAN 0x80000000
|
||||
# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF
|
||||
# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000
|
||||
#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228
|
||||
#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c
|
||||
#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230
|
||||
#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240
|
||||
#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244
|
||||
#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248
|
||||
#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
|
||||
#define NV03_PFIFO_CACHE1_PULL0 0x00003240
|
||||
#define NV04_PFIFO_CACHE1_PULL0 0x00003250
|
||||
# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010
|
||||
# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000
|
||||
#define NV03_PFIFO_CACHE1_PULL1 0x00003250
|
||||
#define NV04_PFIFO_CACHE1_PULL1 0x00003254
|
||||
#define NV04_PFIFO_CACHE1_HASH 0x00003258
|
||||
#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260
|
||||
#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264
|
||||
#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268
|
||||
#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
|
||||
#define NV03_PFIFO_CACHE1_GET 0x00003270
|
||||
#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
|
||||
#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
|
||||
#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
|
||||
#define NV40_PFIFO_UNK32E4 0x000032E4
|
||||
#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
|
||||
#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8))
|
||||
#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8))
|
||||
#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8))
|
||||
#endif
|
@ -26,6 +26,7 @@
|
||||
|
||||
#include <core/client.h>
|
||||
#include <engine/fifo.h>
|
||||
#include <engine/fifo/chan.h>
|
||||
#include <subdev/instmem.h>
|
||||
#include <subdev/timer.h>
|
||||
|
||||
|
@ -26,6 +26,7 @@
|
||||
|
||||
#include <core/client.h>
|
||||
#include <engine/fifo.h>
|
||||
#include <engine/fifo/chan.h>
|
||||
#include <subdev/fb.h>
|
||||
|
||||
struct pipe_state {
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
#include <core/client.h>
|
||||
#include <engine/fifo.h>
|
||||
#include <engine/fifo/chan.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/timer.h>
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include "regs.h"
|
||||
|
||||
#include <engine/fifo.h>
|
||||
#include <engine/fifo/chan.h>
|
||||
|
||||
/*******************************************************************************
|
||||
* Graphics object classes
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include "regs.h"
|
||||
|
||||
#include <engine/fifo.h>
|
||||
#include <engine/fifo/chan.h>
|
||||
|
||||
/*******************************************************************************
|
||||
* PGRAPH context
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include "regs.h"
|
||||
|
||||
#include <engine/fifo.h>
|
||||
#include <engine/fifo/chan.h>
|
||||
#include <subdev/fb.h>
|
||||
|
||||
/*******************************************************************************
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include "regs.h"
|
||||
|
||||
#include <engine/fifo.h>
|
||||
#include <engine/fifo/chan.h>
|
||||
|
||||
/*******************************************************************************
|
||||
* Graphics object classes
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include "regs.h"
|
||||
|
||||
#include <engine/fifo.h>
|
||||
#include <engine/fifo/chan.h>
|
||||
|
||||
/*******************************************************************************
|
||||
* Graphics object classes
|
||||
|
Loading…
x
Reference in New Issue
Block a user