nouveau/gsp: move to 535.113.01
[sfrench/cifs-2.6.git] / drivers / gpu / drm / nouveau / nvkm / engine / fifo / r535.c
1 /*
2  * Copyright 2023 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "priv.h"
23 #include "cgrp.h"
24 #include "chan.h"
25 #include "chid.h"
26 #include "runl.h"
27
28 #include <core/gpuobj.h>
29 #include <subdev/gsp.h>
30 #include <subdev/mmu.h>
31 #include <subdev/vfn.h>
32 #include <engine/gr.h>
33
34 #include <nvhw/drf.h>
35
36 #include <nvrm/nvtypes.h>
37 #include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
38 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
39 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h>
40 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h>
41 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
42 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
43 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h>
44 #include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
45 #include <nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h>
46 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h>
47
48 static u32
49 r535_chan_doorbell_handle(struct nvkm_chan *chan)
50 {
51         return (chan->cgrp->runl->id << 16) | chan->id;
52 }
53
54 static void
55 r535_chan_stop(struct nvkm_chan *chan)
56 {
57 }
58
59 static void
60 r535_chan_start(struct nvkm_chan *chan)
61 {
62 }
63
64 static void
65 r535_chan_ramfc_clear(struct nvkm_chan *chan)
66 {
67         struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
68
69         nvkm_gsp_rm_free(&chan->rm.object);
70
71         dma_free_coherent(fifo->engine.subdev.device->dev, fifo->rm.mthdbuf_size,
72                           chan->rm.mthdbuf.ptr, chan->rm.mthdbuf.addr);
73
74         nvkm_cgrp_vctx_put(chan->cgrp, &chan->rm.grctx);
75 }
76
77 #define CHID_PER_USERD 8
78
79 static int
80 r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
81 {
82         struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
83         struct nvkm_engn *engn;
84         struct nvkm_device *device = fifo->engine.subdev.device;
85         NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
86         const int userd_p = chan->id / CHID_PER_USERD;
87         const int userd_i = chan->id % CHID_PER_USERD;
88         u32 eT = ~0;
89         int ret;
90
91         if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) {
92                 ret = nvkm_subdev_oneinit(&device->gr->engine.subdev);
93                 if (ret)
94                         return ret;
95         }
96
97         nvkm_runl_foreach_engn(engn, chan->cgrp->runl) {
98                 eT = engn->id;
99                 break;
100         }
101
102         if (WARN_ON(eT == ~0))
103                 return -EINVAL;
104
105         chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev,
106                                                   fifo->rm.mthdbuf_size,
107                                                   &chan->rm.mthdbuf.addr, GFP_KERNEL);
108         if (!chan->rm.mthdbuf.ptr)
109                 return -ENOMEM;
110
111         args = nvkm_gsp_rm_alloc_get(&chan->vmm->rm.device.object, 0xf1f00000 | chan->id,
112                                      fifo->func->chan.user.oclass, sizeof(*args),
113                                      &chan->rm.object);
114         if (WARN_ON(IS_ERR(args)))
115                 return PTR_ERR(args);
116
117         args->gpFifoOffset = offset;
118         args->gpFifoEntries = length / 8;
119
120         args->flags  = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL);
121         args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE);
122         args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE);
123         args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, chan->runq);
124         if (!priv)
125                 args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE);
126         else
127                 args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE);
128         args->flags |= NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE);
129         args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE);
130
131         args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, userd_i);
132         args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE);
133         args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, userd_p);
134         args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE);
135
136         args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE);
137         args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE);
138         args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE);
139         args->flags |= NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE);
140         args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE);
141         args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE);
142         args->flags |= NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT);
143         args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE);
144         args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
145
146         args->hVASpace = chan->vmm->rm.object.handle;
147         args->engineType = eT;
148
149         args->instanceMem.base = chan->inst->addr;
150         args->instanceMem.size = chan->inst->size;
151         args->instanceMem.addressSpace = 2;
152         args->instanceMem.cacheAttrib = 1;
153
154         args->userdMem.base = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
155         args->userdMem.size = fifo->func->chan.func->userd->size;
156         args->userdMem.addressSpace = 2;
157         args->userdMem.cacheAttrib = 1;
158
159         args->ramfcMem.base = chan->inst->addr + 0;
160         args->ramfcMem.size = 0x200;
161         args->ramfcMem.addressSpace = 2;
162         args->ramfcMem.cacheAttrib = 1;
163
164         args->mthdbufMem.base = chan->rm.mthdbuf.addr;
165         args->mthdbufMem.size = fifo->rm.mthdbuf_size;
166         args->mthdbufMem.addressSpace = 1;
167         args->mthdbufMem.cacheAttrib = 0;
168
169         if (!priv)
170                 args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, USER);
171         else
172                 args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN);
173         args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE);
174         args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
175
176         ret = nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
177         if (ret)
178                 return ret;
179
180         if (1) {
181                 NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *ctrl;
182
183                 if (1) {
184                         NVA06F_CTRL_BIND_PARAMS *ctrl;
185
186                         ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object,
187                                                     NVA06F_CTRL_CMD_BIND, sizeof(*ctrl));
188                         if (WARN_ON(IS_ERR(ctrl)))
189                                 return PTR_ERR(ctrl);
190
191                         ctrl->engineType = eT;
192
193                         ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl);
194                         if (ret)
195                                 return ret;
196                 }
197
198                 ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object,
199                                             NVA06F_CTRL_CMD_GPFIFO_SCHEDULE, sizeof(*ctrl));
200                 if (WARN_ON(IS_ERR(ctrl)))
201                         return PTR_ERR(ctrl);
202
203                 ctrl->bEnable = 1;
204                 ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl);
205         }
206
207         return ret;
208 }
209
210 static const struct nvkm_chan_func_ramfc
211 r535_chan_ramfc = {
212         .write = r535_chan_ramfc_write,
213         .clear = r535_chan_ramfc_clear,
214         .devm = 0xfff,
215         .priv = true,
216 };
217
218 struct r535_chan_userd {
219         struct nvkm_memory *mem;
220         struct nvkm_memory *map;
221         int chid;
222         u32 used;
223
224         struct list_head head;
225 } *userd;
226
227 static void
228 r535_chan_id_put(struct nvkm_chan *chan)
229 {
230         struct nvkm_runl *runl = chan->cgrp->runl;
231         struct nvkm_fifo *fifo = runl->fifo;
232         struct r535_chan_userd *userd;
233
234         mutex_lock(&fifo->userd.mutex);
235         list_for_each_entry(userd, &fifo->userd.list, head) {
236                 if (userd->map == chan->userd.mem) {
237                         u32 chid = chan->userd.base / chan->func->userd->size;
238
239                         userd->used &= ~BIT(chid);
240                         if (!userd->used) {
241                                 nvkm_memory_unref(&userd->map);
242                                 nvkm_memory_unref(&userd->mem);
243                                 nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
244                                 list_del(&userd->head);
245                         }
246
247                         break;
248                 }
249         }
250         mutex_unlock(&fifo->userd.mutex);
251
252 }
253
254 static int
255 r535_chan_id_get_locked(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd)
256 {
257         const u32 userd_size = CHID_PER_USERD * chan->func->userd->size;
258         struct nvkm_runl *runl = chan->cgrp->runl;
259         struct nvkm_fifo *fifo = runl->fifo;
260         struct r535_chan_userd *userd;
261         u32 chid;
262         int ret;
263
264         if (ouserd + chan->func->userd->size >= userd_size ||
265             (ouserd & (chan->func->userd->size - 1))) {
266                 RUNL_DEBUG(runl, "ouserd %llx", ouserd);
267                 return -EINVAL;
268         }
269
270         chid = div_u64(ouserd, chan->func->userd->size);
271
272         list_for_each_entry(userd, &fifo->userd.list, head) {
273                 if (userd->mem == muserd) {
274                         if (userd->used & BIT(chid))
275                                 return -EBUSY;
276                         break;
277                 }
278         }
279
280         if (&userd->head == &fifo->userd.list) {
281                 if (nvkm_memory_size(muserd) < userd_size) {
282                         RUNL_DEBUG(runl, "userd too small");
283                         return -EINVAL;
284                 }
285
286                 userd = kzalloc(sizeof(*userd), GFP_KERNEL);
287                 if (!userd)
288                         return -ENOMEM;
289
290                 userd->chid = nvkm_chid_get(runl->chid, chan);
291                 if (userd->chid < 0) {
292                         ret = userd->chid;
293                         kfree(userd);
294                         return ret;
295                 }
296
297                 userd->mem = nvkm_memory_ref(muserd);
298
299                 ret = nvkm_memory_kmap(userd->mem, &userd->map);
300                 if (ret) {
301                         nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
302                         kfree(userd);
303                         return ret;
304                 }
305
306
307                 list_add(&userd->head, &fifo->userd.list);
308         }
309
310         userd->used |= BIT(chid);
311
312         chan->userd.mem = nvkm_memory_ref(userd->map);
313         chan->userd.base = ouserd;
314
315         return (userd->chid * CHID_PER_USERD) + chid;
316 }
317
318 static int
319 r535_chan_id_get(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd)
320 {
321         struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
322         int ret;
323
324         mutex_lock(&fifo->userd.mutex);
325         ret = r535_chan_id_get_locked(chan, muserd, ouserd);
326         mutex_unlock(&fifo->userd.mutex);
327         return ret;
328 }
329
330 static const struct nvkm_chan_func
331 r535_chan = {
332         .id_get = r535_chan_id_get,
333         .id_put = r535_chan_id_put,
334         .inst = &gf100_chan_inst,
335         .userd = &gv100_chan_userd,
336         .ramfc = &r535_chan_ramfc,
337         .start = r535_chan_start,
338         .stop = r535_chan_stop,
339         .doorbell_handle = r535_chan_doorbell_handle,
340 };
341
342 static const struct nvkm_cgrp_func
343 r535_cgrp = {
344 };
345
346 static int
347 r535_engn_nonstall(struct nvkm_engn *engn)
348 {
349         struct nvkm_subdev *subdev = &engn->engine->subdev;
350         int ret;
351
352         ret = nvkm_gsp_intr_nonstall(subdev->device->gsp, subdev->type, subdev->inst);
353         WARN_ON(ret < 0);
354         return ret;
355 }
356
357 static const struct nvkm_engn_func
358 r535_ce = {
359         .nonstall = r535_engn_nonstall,
360 };
361
362 static int
363 r535_gr_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
364 {
365         /* RM requires GR context buffers to remain mapped until after the
366          * channel has been destroyed (as opposed to after the last gr obj
367          * has been deleted).
368          *
369          * Take an extra ref here, which will be released once the channel
370          * object has been deleted.
371          */
372         refcount_inc(&vctx->refs);
373         chan->rm.grctx = vctx;
374         return 0;
375 }
376
377 static const struct nvkm_engn_func
378 r535_gr = {
379         .nonstall = r535_engn_nonstall,
380         .ctor2 = r535_gr_ctor,
381 };
382
383 static int
384 r535_flcn_bind(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
385 {
386         struct nvkm_gsp_client *client = &chan->vmm->rm.client;
387         NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
388
389         ctrl = nvkm_gsp_rm_ctrl_get(&chan->vmm->rm.device.subdevice,
390                                     NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
391         if (IS_ERR(ctrl))
392                 return PTR_ERR(ctrl);
393
394         ctrl->hClient = client->object.handle;
395         ctrl->hObject = chan->rm.object.handle;
396         ctrl->hChanClient = client->object.handle;
397         ctrl->virtAddress = vctx->vma->addr;
398         ctrl->size = vctx->inst->size;
399         ctrl->engineType = engn->id;
400         ctrl->ChID = chan->id;
401
402         return nvkm_gsp_rm_ctrl_wr(&chan->vmm->rm.device.subdevice, ctrl);
403 }
404
405 static int
406 r535_flcn_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
407 {
408         int ret;
409
410         if (WARN_ON(!engn->rm.size))
411                 return -EINVAL;
412
413         ret = nvkm_gpuobj_new(engn->engine->subdev.device, engn->rm.size, 0, true, NULL,
414                               &vctx->inst);
415         if (ret)
416                 return ret;
417
418         ret = nvkm_vmm_get(vctx->vmm, 12, vctx->inst->size, &vctx->vma);
419         if (ret)
420                 return ret;
421
422         ret = nvkm_memory_map(vctx->inst, 0, vctx->vmm, vctx->vma, NULL, 0);
423         if (ret)
424                 return ret;
425
426         return r535_flcn_bind(engn, vctx, chan);
427 }
428
429 static const struct nvkm_engn_func
430 r535_flcn = {
431         .nonstall = r535_engn_nonstall,
432         .ctor2 = r535_flcn_ctor,
433 };
434
435 static void
436 r535_runl_allow(struct nvkm_runl *runl, u32 engm)
437 {
438 }
439
440 static void
441 r535_runl_block(struct nvkm_runl *runl, u32 engm)
442 {
443 }
444
445 static const struct nvkm_runl_func
446 r535_runl = {
447         .block = r535_runl_block,
448         .allow = r535_runl_allow,
449 };
450
451 static int
452 r535_fifo_2080_type(enum nvkm_subdev_type type, int inst)
453 {
454         switch (type) {
455         case NVKM_ENGINE_GR: return NV2080_ENGINE_TYPE_GR0;
456         case NVKM_ENGINE_CE: return NV2080_ENGINE_TYPE_COPY0 + inst;
457         case NVKM_ENGINE_SEC2: return NV2080_ENGINE_TYPE_SEC2;
458         case NVKM_ENGINE_NVDEC: return NV2080_ENGINE_TYPE_NVDEC0 + inst;
459         case NVKM_ENGINE_NVENC: return NV2080_ENGINE_TYPE_NVENC0 + inst;
460         case NVKM_ENGINE_NVJPG: return NV2080_ENGINE_TYPE_NVJPEG0 + inst;
461         case NVKM_ENGINE_OFA: return NV2080_ENGINE_TYPE_OFA;
462         case NVKM_ENGINE_SW: return NV2080_ENGINE_TYPE_SW;
463         default:
464                 break;
465         }
466
467         WARN_ON(1);
468         return -EINVAL;
469 }
470
471 static int
472 r535_fifo_engn_type(RM_ENGINE_TYPE rm, enum nvkm_subdev_type *ptype)
473 {
474         switch (rm) {
475         case RM_ENGINE_TYPE_GR0:
476                 *ptype = NVKM_ENGINE_GR;
477                 return 0;
478         case RM_ENGINE_TYPE_COPY0...RM_ENGINE_TYPE_COPY9:
479                 *ptype = NVKM_ENGINE_CE;
480                 return rm - RM_ENGINE_TYPE_COPY0;
481         case RM_ENGINE_TYPE_NVDEC0...RM_ENGINE_TYPE_NVDEC7:
482                 *ptype = NVKM_ENGINE_NVDEC;
483                 return rm - RM_ENGINE_TYPE_NVDEC0;
484         case RM_ENGINE_TYPE_NVENC0...RM_ENGINE_TYPE_NVENC2:
485                 *ptype = NVKM_ENGINE_NVENC;
486                 return rm - RM_ENGINE_TYPE_NVENC0;
487         case RM_ENGINE_TYPE_SW:
488                 *ptype = NVKM_ENGINE_SW;
489                 return 0;
490         case RM_ENGINE_TYPE_SEC2:
491                 *ptype = NVKM_ENGINE_SEC2;
492                 return 0;
493         case RM_ENGINE_TYPE_NVJPEG0...RM_ENGINE_TYPE_NVJPEG7:
494                 *ptype = NVKM_ENGINE_NVJPG;
495                 return rm - RM_ENGINE_TYPE_NVJPEG0;
496         case RM_ENGINE_TYPE_OFA:
497                 *ptype = NVKM_ENGINE_OFA;
498                 return 0;
499         default:
500                 return -EINVAL;
501         }
502 }
503
504 static int
505 r535_fifo_ectx_size(struct nvkm_fifo *fifo)
506 {
507         NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS *ctrl;
508         struct nvkm_gsp *gsp = fifo->engine.subdev.device->gsp;
509         struct nvkm_runl *runl;
510         struct nvkm_engn *engn;
511
512         ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
513                                    NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO,
514                                    sizeof(*ctrl));
515         if (WARN_ON(IS_ERR(ctrl)))
516                 return PTR_ERR(ctrl);
517
518         for (int i = 0; i < ctrl->numConstructedFalcons; i++) {
519                 nvkm_runl_foreach(runl, fifo) {
520                         nvkm_runl_foreach_engn(engn, runl) {
521                                 if (engn->rm.desc == ctrl->constructedFalconsTable[i].engDesc) {
522                                         engn->rm.size =
523                                                 ctrl->constructedFalconsTable[i].ctxBufferSize;
524                                         break;
525                                 }
526                         }
527                 }
528         }
529
530         nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
531         return 0;
532 }
533
534 static int
535 r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
536 {
537         struct nvkm_subdev *subdev = &fifo->engine.subdev;
538         struct nvkm_gsp *gsp = subdev->device->gsp;
539         struct nvkm_runl *runl;
540         struct nvkm_engn *engn;
541         u32 cgids = 2048;
542         u32 chids = 2048 / CHID_PER_USERD;
543         int ret;
544         NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl;
545
546         if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, cgids, 0, cgids, &fifo->cgid)) ||
547             (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, 0, chids, &fifo->chid)))
548                 return ret;
549
550         ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
551                                    NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE, sizeof(*ctrl));
552         if (WARN_ON(IS_ERR(ctrl)))
553                 return PTR_ERR(ctrl);
554
555         for (int i = 0; i < ctrl->numEntries; i++) {
556                 const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE];
557                 const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST];
558
559                 runl = nvkm_runl_get(fifo, id, addr);
560                 if (!runl) {
561                         runl = nvkm_runl_new(fifo, id, addr, 0);
562                         if (WARN_ON(IS_ERR(runl)))
563                                 continue;
564                 }
565         }
566
567         for (int i = 0; i < ctrl->numEntries; i++) {
568                 const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE];
569                 const u32 rmid = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RM_ENGINE_TYPE];
570                 const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST];
571                 enum nvkm_subdev_type type;
572                 int inst, nv2080;
573
574                 runl = nvkm_runl_get(fifo, id, addr);
575                 if (!runl)
576                         continue;
577
578                 inst = r535_fifo_engn_type(rmid, &type);
579                 if (inst < 0) {
580                         nvkm_warn(subdev, "RM_ENGINE_TYPE 0x%x\n", rmid);
581                         nvkm_runl_del(runl);
582                         continue;
583                 }
584
585                 nv2080 = r535_fifo_2080_type(type, inst);
586                 if (nv2080 < 0) {
587                         nvkm_runl_del(runl);
588                         continue;
589                 }
590
591                 switch (type) {
592                 case NVKM_ENGINE_CE:
593                         engn = nvkm_runl_add(runl, nv2080, &r535_ce, type, inst);
594                         break;
595                 case NVKM_ENGINE_GR:
596                         engn = nvkm_runl_add(runl, nv2080, &r535_gr, type, inst);
597                         break;
598                 case NVKM_ENGINE_NVDEC:
599                 case NVKM_ENGINE_NVENC:
600                 case NVKM_ENGINE_NVJPG:
601                 case NVKM_ENGINE_OFA:
602                         engn = nvkm_runl_add(runl, nv2080, &r535_flcn, type, inst);
603                         break;
604                 case NVKM_ENGINE_SW:
605                         continue;
606                 default:
607                         engn = NULL;
608                         break;
609                 }
610
611                 if (!engn) {
612                         nvkm_runl_del(runl);
613                         continue;
614                 }
615
616                 engn->rm.desc = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_ENG_DESC];
617         }
618
619         nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
620
621         {
622                 NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS *ctrl;
623
624                 ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
625                                            NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE,
626                                            sizeof(*ctrl));
627                 if (IS_ERR(ctrl))
628                         return PTR_ERR(ctrl);
629
630                 fifo->rm.mthdbuf_size = ctrl->size;
631
632                 nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
633         }
634
635         return r535_fifo_ectx_size(fifo);
636 }
637
638 static void
639 r535_fifo_dtor(struct nvkm_fifo *fifo)
640 {
641         kfree(fifo->func);
642 }
643
644 int
645 r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device,
646               enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
647 {
648         struct nvkm_fifo_func *rm;
649
650         if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
651                 return -ENOMEM;
652
653         rm->dtor = r535_fifo_dtor;
654         rm->runl_ctor = r535_fifo_runl_ctor;
655         rm->runl = &r535_runl;
656         rm->cgrp = hw->cgrp;
657         rm->cgrp.func = &r535_cgrp;
658         rm->chan = hw->chan;
659         rm->chan.func = &r535_chan;
660         rm->nonstall = &ga100_fifo_nonstall;
661         rm->nonstall_ctor = ga100_fifo_nonstall_ctor;
662
663         return nvkm_fifo_new_(rm, device, type, inst, pfifo);
664 }