Merge tag 'topic/nvidia-gsp-2023-11-03' of git://anongit.freedesktop.org/drm/drm
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 4 Nov 2023 20:42:07 +0000 (10:42 -1000)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 4 Nov 2023 20:42:07 +0000 (10:42 -1000)
Pull drm nouveau GSP support from Dave Airlie:
 "This adds the initial support for the NVIDIA GSP firmware to nouveau.

  This firmware is a new direction for Turing+ GPUs, and is only enabled
  by default on Ada generation. Other generations need to use
  nouveau.config=NvGspRm=1

  The GSP firmware takes nearly all the GPU init and power management
  tasks onto a risc-v CPU on the GPU.

  This series is mostly the work from Ben Skeggs, and Dave added some
  patches to rebase it to the latest firmware release which is where we
  will stay for as long as possible as the firmwares have no ABI
  stability"

* tag 'topic/nvidia-gsp-2023-11-03' of git://anongit.freedesktop.org/drm/drm: (49 commits)
  nouveau/gsp: add some basic registry entries.
  nouveau/gsp: fix message signature.
  nouveau/gsp: move to 535.113.01
  nouveau/disp: fix post-gsp build on 32-bit arm.
  nouveau: fix r535 build on 32-bit arm.
  drm/nouveau/ofa/r535: initial support
  drm/nouveau/nvjpg/r535: initial support
  drm/nouveau/nvenc/r535: initial support
  drm/nouveau/nvdec/r535: initial support
  drm/nouveau/gr/r535: initial support
  drm/nouveau/ce/r535: initial support
  drm/nouveau/fifo/r535: initial support
  drm/nouveau/disp/r535: initial support
  drm/nouveau/mmu/r535: initial support
  drm/nouveau/gsp/r535: add interrupt handling
  drm/nouveau/gsp/r535: add support for rm alloc
  drm/nouveau/gsp/r535: add support for rm control
  drm/nouveau/gsp/r535: add support for booting GSP-RM
  drm/nouveau/nvkm: support loading fws into sg_table
  drm/nouveau/kms/tu102-: disable vbios parsing when running on RM
  ...

230 files changed:
drivers/gpu/drm/nouveau/dispnv50/core.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/include/nvif/cl0080.h
drivers/gpu/drm/nouveau/include/nvif/class.h
drivers/gpu/drm/nouveau/include/nvkm/core/device.h
drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nvif/disp.c
drivers/gpu/drm/nouveau/nvkm/core/firmware.c
drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/sec2/ga102.c
drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/sec2/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
drivers/gpu/drm/nouveau/nvkm/falcon/base.c
drivers/gpu/drm/nouveau/nvkm/falcon/ga100.c
drivers/gpu/drm/nouveau/nvkm/falcon/ga102.c
drivers/gpu/drm/nouveau/nvkm/falcon/tu102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/acr/ga102.c
drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/fb/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/ga102.c
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c
drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c
drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/ga100.c
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/tu102.c

index abefc2343443bef6895f95a35ac08cde64d273ca..f045515696cbbc8b86f45c3f3319a76690b25ad6 100644 (file)
@@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
                int version;
                int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
        } cores[] = {
+               { AD102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
                { GA102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
                { TU102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
                { GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new },
index a0ac8c258d9ff118c19e6d4d65e3dc5602acfd2d..7840b6428afbe468b2ad51ac2f59c3fc2c77a3e1 100644 (file)
@@ -1592,6 +1592,148 @@ nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *st
        nv_encoder->crtc = NULL;
 }
 
+// common/inc/displayport/displayport.h
+#define DP_CONFIG_WATERMARK_ADJUST                   2
+#define DP_CONFIG_WATERMARK_LIMIT                   20
+#define DP_CONFIG_INCREASED_WATERMARK_ADJUST         8
+#define DP_CONFIG_INCREASED_WATERMARK_LIMIT         22
+
+static bool
+nv50_sor_dp_watermark_sst(struct nouveau_encoder *outp,
+                         struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+       bool enhancedFraming = outp->dp.dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP;
+       u64 minRate = outp->dp.link_bw * 1000;
+       unsigned tuSize = 64;
+       unsigned waterMark;
+       unsigned hBlankSym;
+       unsigned vBlankSym;
+       unsigned watermarkAdjust = DP_CONFIG_WATERMARK_ADJUST;
+       unsigned watermarkMinimum = DP_CONFIG_WATERMARK_LIMIT;
+       // depth is multiplied by 16 in case of DSC enable
+       s32 hblank_symbols;
+       // number of link clocks per line.
+       int vblank_symbols        = 0;
+       bool bEnableDsc = false;
+       unsigned surfaceWidth = asyh->mode.h.blanks - asyh->mode.h.blanke;
+       unsigned rasterWidth = asyh->mode.h.active;
+       unsigned depth = asyh->or.bpc * 3;
+       unsigned DSC_FACTOR = bEnableDsc ? 16 : 1;
+       u64 pixelClockHz = asyh->mode.clock * 1000;
+       u64 PrecisionFactor = 100000, ratioF, watermarkF;
+       u32 numLanesPerLink = outp->dp.link_nr;
+       u32 numSymbolsPerLine;
+       u32 BlankingBits;
+       u32 surfaceWidthPerLink;
+       u32 PixelSteeringBits;
+       u64 NumBlankingLinkClocks;
+       u32 MinHBlank;
+
+       if (outp->outp.info.dp.increased_wm) {
+               watermarkAdjust = DP_CONFIG_INCREASED_WATERMARK_ADJUST;
+               watermarkMinimum = DP_CONFIG_INCREASED_WATERMARK_LIMIT;
+       }
+
+       if ((pixelClockHz * depth) >= (8 * minRate * outp->dp.link_nr * DSC_FACTOR))
+       {
+               return false;
+       }
+
+       //
+       // For DSC, if (pclk * bpp) < (1/64 * orclk * 8 * lanes) then some TU may end up with
+       // 0 active symbols. This may cause HW hang. Bug 200379426
+       //
+       if ((bEnableDsc) &&
+           ((pixelClockHz * depth) < div_u64(8 * minRate * outp->dp.link_nr * DSC_FACTOR, 64)))
+       {
+               return false;
+       }
+
+       //
+       //  Perform the SST calculation.
+       //      For auto mode the watermark calculation does not need to track accumulated error the
+       //      formulas for manual mode will not work.  So below calculation was extracted from the DTB.
+       //
+       ratioF = div_u64((u64)pixelClockHz * depth * PrecisionFactor, DSC_FACTOR);
+
+       ratioF = div_u64(ratioF, 8 * (u64) minRate * outp->dp.link_nr);
+
+       if (PrecisionFactor < ratioF) // Assert if we will end up with a negative number in below
+               return false;
+
+       watermarkF = div_u64(ratioF * tuSize * (PrecisionFactor - ratioF), PrecisionFactor);
+       waterMark = (unsigned)(watermarkAdjust + (div_u64(2 * div_u64(depth * PrecisionFactor, 8 * numLanesPerLink * DSC_FACTOR) + watermarkF, PrecisionFactor)));
+
+       //
+       //  Bounds check the watermark
+       //
+       numSymbolsPerLine = div_u64(surfaceWidth * depth, 8 * outp->dp.link_nr * DSC_FACTOR);
+
+       if (WARN_ON(waterMark > 39 || waterMark > numSymbolsPerLine))
+               return false;
+
+       //
+       //  Clamp the low side
+       //
+       if (waterMark < watermarkMinimum)
+               waterMark = watermarkMinimum;
+
+       //Bits to send BS/BE/Extra symbols due to pixel padding
+       //Also accounts for enhanced framing.
+       BlankingBits = 3*8*numLanesPerLink + (enhancedFraming ? 3*8*numLanesPerLink : 0);
+
+       //VBID/MVID/MAUD sent 4 times all the time
+       BlankingBits += 3*8*4;
+
+       surfaceWidthPerLink = surfaceWidth;
+
+       //Extra bits sent due to pixel steering
+       u32 remain;
+       div_u64_rem(surfaceWidthPerLink, numLanesPerLink, &remain);
+       PixelSteeringBits = remain ? div_u64((numLanesPerLink - remain) * depth, DSC_FACTOR) : 0;
+
+       BlankingBits += PixelSteeringBits;
+       NumBlankingLinkClocks = div_u64((u64)BlankingBits * PrecisionFactor, (8 * numLanesPerLink));
+       MinHBlank = (u32)(div_u64(div_u64(NumBlankingLinkClocks * pixelClockHz, minRate), PrecisionFactor));
+       MinHBlank += 12;
+
+       if (WARN_ON(MinHBlank > rasterWidth - surfaceWidth))
+               return false;
+
+       // Bug 702290 - Active Width should be greater than 60
+       if (WARN_ON(surfaceWidth <= 60))
+               return false;
+
+
+       hblank_symbols = (s32)(div_u64((u64)(rasterWidth - surfaceWidth - MinHBlank) * minRate, pixelClockHz));
+
+       //reduce HBlank Symbols to account for secondary data packet
+       hblank_symbols -= 1; //Stuffer latency to send BS
+       hblank_symbols -= 3; //SPKT latency to send data to stuffer
+
+       hblank_symbols -= numLanesPerLink == 1 ? 9  : numLanesPerLink == 2 ? 6 : 3;
+
+       hBlankSym = (hblank_symbols < 0) ? 0 : hblank_symbols;
+
+       // Refer to dev_disp.ref for more information.
+       // # symbols/vblank = ((SetRasterBlankEnd.X + SetRasterSize.Width - SetRasterBlankStart.X - 40) * link_clk / pclk) - Y - 1;
+       // where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39
+       if (surfaceWidth < 40)
+       {
+               vblank_symbols = 0;
+       }
+       else
+       {
+               vblank_symbols = (s32)((div_u64((u64)(surfaceWidth - 40) * minRate, pixelClockHz))) - 1;
+
+               vblank_symbols -= numLanesPerLink == 1 ? 39  : numLanesPerLink == 2 ? 21 : 12;
+       }
+
+       vBlankSym = (vblank_symbols < 0) ? 0 : vblank_symbols;
+
+       return nvif_outp_dp_sst(&outp->outp, head->base.index, waterMark, hBlankSym, vBlankSym);
+}
+
 static void
 nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
 {
@@ -1679,6 +1821,7 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta
                break;
        case DCB_OUTPUT_DP:
                nouveau_dp_train(nv_encoder, false, mode->clock, asyh->or.bpc);
+               nv50_sor_dp_watermark_sst(nv_encoder, head, asyh);
                depth = nv50_dp_bpc_to_depth(asyh->or.bpc);
 
                if (nv_encoder->outp.or.link & 1)
index 8b5a240d57e473d106e8b40630a635ea804dad1f..fa161b74d96774ff76b05707c77e662640b87849 100644 (file)
@@ -35,6 +35,7 @@ struct nv_device_info_v0 {
 #define NV_DEVICE_INFO_V0_VOLTA                                            0x0b
 #define NV_DEVICE_INFO_V0_TURING                                           0x0c
 #define NV_DEVICE_INFO_V0_AMPERE                                           0x0d
+#define NV_DEVICE_INFO_V0_ADA                                              0x0e
        __u8  family;
        __u8  pad06[2];
        __u64 ram_size;
@@ -90,6 +91,8 @@ struct nv_device_time_v0 {
 #define NV_DEVICE_HOST_RUNLIST_ENGINES_SEC2                          0x00004000
 #define NV_DEVICE_HOST_RUNLIST_ENGINES_NVDEC                         0x00008000
 #define NV_DEVICE_HOST_RUNLIST_ENGINES_NVENC                         0x00010000
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_NVJPG                         0x00020000
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_OFA                           0x00040000
 /* Returns the number of available channels on runlist(data). */
 #define NV_DEVICE_HOST_RUNLIST_CHANNELS               NV_DEVICE_HOST(0x00000101)
 #endif
index ad1e5de84e80f5081b0702ade33d6f9725b596f2..e668ab1664f0857d86bd142011d7427119d0adad 100644 (file)
 #define GV100_DISP                                    /* if0010.h */ 0x0000c370
 #define TU102_DISP                                    /* if0010.h */ 0x0000c570
 #define GA102_DISP                                    /* if0010.h */ 0x0000c670
+#define AD102_DISP                                    /* if0010.h */ 0x0000c770
 
 #define GV100_DISP_CAPS                                              0x0000c373
 
 #define GV100_DISP_CORE_CHANNEL_DMA                   /* if0014.h */ 0x0000c37d
 #define TU102_DISP_CORE_CHANNEL_DMA                   /* if0014.h */ 0x0000c57d
 #define GA102_DISP_CORE_CHANNEL_DMA                   /* if0014.h */ 0x0000c67d
+#define AD102_DISP_CORE_CHANNEL_DMA                   /* if0014.h */ 0x0000c77d
 
 #define NV50_DISP_OVERLAY_CHANNEL_DMA                 /* if0014.h */ 0x0000507e
 #define G82_DISP_OVERLAY_CHANNEL_DMA                  /* if0014.h */ 0x0000827e
 
 #define AMPERE_B                                      /* cl9097.h */ 0x0000c797
 
+#define ADA_A                                         /* cl9097.h */ 0x0000c997
+
 #define NV74_BSP                                                     0x000074b0
 
+#define NVC4B0_VIDEO_DECODER                                         0x0000c4b0
+#define NVC6B0_VIDEO_DECODER                                         0x0000c6b0
+#define NVC7B0_VIDEO_DECODER                                         0x0000c7b0
+#define NVC9B0_VIDEO_DECODER                                         0x0000c9b0
+
 #define GT212_MSVLD                                                  0x000085b1
 #define IGT21A_MSVLD                                                 0x000086b1
 #define G98_MSVLD                                                    0x000088b1
 #define AMPERE_DMA_COPY_A                                            0x0000c6b5
 #define AMPERE_DMA_COPY_B                                            0x0000c7b5
 
+#define NVC4B7_VIDEO_ENCODER                                         0x0000c4b7
+#define NVC7B7_VIDEO_ENCODER                                         0x0000c7b7
+#define NVC9B7_VIDEO_ENCODER                                         0x0000c9b7
+
 #define FERMI_DECOMPRESS                                             0x000090b8
 
 #define NV50_COMPUTE                                                 0x000050c0
 #define VOLTA_COMPUTE_A                                              0x0000c3c0
 #define TURING_COMPUTE_A                                             0x0000c5c0
 #define AMPERE_COMPUTE_B                                             0x0000c7c0
+#define ADA_COMPUTE_A                                                0x0000c9c0
 
 #define NV74_CIPHER                                                  0x000074c1
+
+#define NVC4D1_VIDEO_NVJPG                                           0x0000c4d1
+#define NVC9D1_VIDEO_NVJPG                                           0x0000c9d1
+
+#define NVC6FA_VIDEO_OFA                                             0x0000c6fa
+#define NVC7FA_VIDEO_OFA                                             0x0000c7fa
+#define NVC9FA_VIDEO_OFA                                             0x0000c9fa
 #endif
index f65b5009acf75a39d5dc43440c28e7bc917fe61f..f057d348221e5adf8d8686f371d6ce12a08d1b0c 100644 (file)
@@ -46,6 +46,7 @@ struct nvkm_device {
                GV100    = 0x140,
                TU100    = 0x160,
                GA100    = 0x170,
+               AD100    = 0x190,
        } card_type;
        u32 chipset;
        u8  chiprev;
index b857cf142c4a3fcc81999b4585316792d5c932cc..3d3f1063aaa7c7e3ed49f6b637682867db26f463 100644 (file)
@@ -48,6 +48,8 @@ int nvkm_falcon_pio_rd(struct nvkm_falcon *, u8 port, enum nvkm_falcon_mem type,
                       const u8 *img, u32 img_base, int len);
 int nvkm_falcon_dma_wr(struct nvkm_falcon *, const u8 *img, u64 dma_addr, u32 dma_base,
                       enum nvkm_falcon_mem mem_type, u32 mem_base, int len, bool sec);
+bool nvkm_falcon_riscv_active(struct nvkm_falcon *);
+void nvkm_falcon_intr_retrigger(struct nvkm_falcon *);
 
 int gm200_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *);
 int gm200_flcn_disable(struct nvkm_falcon *);
@@ -61,10 +63,15 @@ void gm200_flcn_tracepc(struct nvkm_falcon *);
 int gp102_flcn_reset_eng(struct nvkm_falcon *);
 extern const struct nvkm_falcon_func_pio gp102_flcn_emem_pio;
 
+bool tu102_flcn_riscv_active(struct nvkm_falcon *);
+
+void ga100_flcn_intr_retrigger(struct nvkm_falcon *);
+
 int ga102_flcn_select(struct nvkm_falcon *);
 int ga102_flcn_reset_prep(struct nvkm_falcon *);
 int ga102_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *);
 extern const struct nvkm_falcon_func_dma ga102_flcn_dma;
+bool ga102_flcn_riscv_active(struct nvkm_falcon *);
 
 void nvkm_falcon_v1_load_imem(struct nvkm_falcon *,
                              void *, u32, u32, u16, u8, bool);
index d4e507e252b13d624a92293e13a4ee5d1b20a0aa..20839be72644be7f1e46093f9182e632e4cc4bd3 100644 (file)
@@ -10,6 +10,7 @@ struct nvkm_firmware {
                enum nvkm_firmware_type {
                        NVKM_FIRMWARE_IMG_RAM,
                        NVKM_FIRMWARE_IMG_DMA,
+                       NVKM_FIRMWARE_IMG_SGT,
                } type;
        } *func;
        const char *name;
@@ -21,7 +22,10 @@ struct nvkm_firmware {
 
        struct nvkm_firmware_mem {
                struct nvkm_memory memory;
-               struct scatterlist sgl;
+               union {
+                       struct scatterlist sgl; /* DMA */
+                       struct sg_table sgt;    /* SGT */
+               };
        } mem;
 };
 
index 58108dea5aeb69fd5fa9b3a5be0206c814b8992f..30c17db483cb53cd90cc121285f5dd2cc89a388f 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: MIT */
-NVKM_LAYOUT_ONCE(NVKM_SUBDEV_TOP     , struct nvkm_top     ,      top)
 NVKM_LAYOUT_ONCE(NVKM_SUBDEV_GSP     , struct nvkm_gsp     ,      gsp)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_TOP     , struct nvkm_top     ,      top)
 NVKM_LAYOUT_ONCE(NVKM_SUBDEV_VFN     , struct nvkm_vfn     ,      vfn)
 NVKM_LAYOUT_ONCE(NVKM_SUBDEV_PCI     , struct nvkm_pci     ,      pci)
 NVKM_LAYOUT_ONCE(NVKM_SUBDEV_VBIOS   , struct nvkm_bios    ,     bios)
@@ -42,9 +42,9 @@ NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSENC   , struct nvkm_engine  ,    msenc)
 NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPDEC  , struct nvkm_engine  ,   mspdec)
 NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPPP   , struct nvkm_engine  ,    msppp)
 NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSVLD   , struct nvkm_engine  ,    msvld)
-NVKM_LAYOUT_INST(NVKM_ENGINE_NVDEC   , struct nvkm_nvdec   ,    nvdec, 5)
+NVKM_LAYOUT_INST(NVKM_ENGINE_NVDEC   , struct nvkm_nvdec   ,    nvdec, 8)
 NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC   , struct nvkm_nvenc   ,    nvenc, 3)
-NVKM_LAYOUT_ONCE(NVKM_ENGINE_NVJPG   , struct nvkm_engine  ,    nvjpg)
+NVKM_LAYOUT_INST(NVKM_ENGINE_NVJPG   , struct nvkm_engine  ,    nvjpg, 8)
 NVKM_LAYOUT_ONCE(NVKM_ENGINE_OFA     , struct nvkm_engine  ,      ofa)
 NVKM_LAYOUT_ONCE(NVKM_ENGINE_PM      , struct nvkm_pm      ,       pm)
 NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC     , struct nvkm_engine  ,      sec)
index ad9aef2df48f798750c8d4687985b86652f872ef..3e8db8280e2a4a32077cb1c38df58eac2a75eba8 100644 (file)
@@ -5,11 +5,29 @@
 #include <core/engine.h>
 #include <core/object.h>
 #include <core/event.h>
+#include <subdev/gsp.h>
 
 struct nvkm_disp {
        const struct nvkm_disp_func *func;
        struct nvkm_engine engine;
 
+       struct {
+               struct nvkm_gsp_client client;
+               struct nvkm_gsp_device device;
+
+               struct nvkm_gsp_object objcom;
+               struct nvkm_gsp_object object;
+
+#define NVKM_DPYID_PLUG   BIT(0)
+#define NVKM_DPYID_UNPLUG BIT(1)
+#define NVKM_DPYID_IRQ    BIT(2)
+               struct nvkm_event event;
+               struct nvkm_gsp_event hpd;
+               struct nvkm_gsp_event irq;
+
+               u32 assigned_sors;
+       } rm;
+
        struct list_head heads;
        struct list_head iors;
        struct list_head outps;
@@ -69,4 +87,5 @@ int gp102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
 int gv100_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
 int tu102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
 int ga102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int ad102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
 #endif
index b7bb8a29a729784651725b142157875f4be4a57e..341f133dc38c22c3ebd1c7bf8d4b30cad715655b 100644 (file)
@@ -62,6 +62,7 @@ struct nvkm_falcon_func {
        int (*enable)(struct nvkm_falcon *);
        int (*select)(struct nvkm_falcon *);
        u32 addr2;
+       u32 riscv_irqmask;
        bool reset_pmc;
        int (*reset_eng)(struct nvkm_falcon *);
        int (*reset_prep)(struct nvkm_falcon *);
@@ -87,6 +88,9 @@ struct nvkm_falcon_func {
                u32 stride;
        } cmdq, msgq;
 
+       bool (*riscv_active)(struct nvkm_falcon *);
+       void (*intr_retrigger)(struct nvkm_falcon *);
+
        struct {
                u32 *data;
                u32  size;
index 221abd6c431037c22a53e4ef1404482dac8bbfc0..be508f65b2808f4576e36070458235c903e328ba 100644 (file)
@@ -4,6 +4,7 @@
 #include <core/engine.h>
 #include <core/object.h>
 #include <core/event.h>
+#include <subdev/gsp.h>
 struct nvkm_fault_data;
 
 #define NVKM_FIFO_ENGN_NR 16
@@ -35,6 +36,15 @@ struct nvkm_chan {
        atomic_t blocked;
        atomic_t errored;
 
+       struct {
+               struct nvkm_gsp_object object;
+               struct {
+                       dma_addr_t addr;
+                       void *ptr;
+               } mthdbuf;
+               struct nvkm_vctx *grctx;
+       } rm;
+
        struct list_head cctxs;
        struct list_head head;
 };
@@ -43,6 +53,8 @@ struct nvkm_chan *nvkm_chan_get_chid(struct nvkm_engine *, int id, unsigned long
 struct nvkm_chan *nvkm_chan_get_inst(struct nvkm_engine *, u64 inst, unsigned long *irqflags);
 void nvkm_chan_put(struct nvkm_chan **, unsigned long irqflags);
 
+struct nvkm_chan *nvkm_uchan_chan(struct nvkm_object *);
+
 struct nvkm_fifo {
        const struct nvkm_fifo_func *func;
        struct nvkm_engine engine;
@@ -66,8 +78,15 @@ struct nvkm_fifo {
        struct {
                struct nvkm_memory *mem;
                struct nvkm_vma *bar1;
+
+               struct mutex mutex;
+               struct list_head list;
        } userd;
 
+       struct {
+               u32 mthdbuf_size;
+       } rm;
+
        spinlock_t lock;
        struct mutex mutex;
 };
index a2333cfe6955c951c4e739fa014160da4b3c3e95..8145796ffc61bb8f220893415ff392b9dce45136 100644 (file)
@@ -55,4 +55,5 @@ int gp10b_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct n
 int gv100_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
 int tu102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
 int ga102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int ad102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
 #endif
index 9baf197ac8336f3a0b6c5d25125ce33981c24f0e..8d2e170883e1f364b68c474abbca2fffc6acbca6 100644 (file)
@@ -12,5 +12,8 @@ struct nvkm_nvdec {
 };
 
 int gm107_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
+int tu102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
+int ga100_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
 int ga102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
+int ad102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
 #endif
index 1a259c5c9a7140e12be1e5c130cc255fe9ea074e..018c58fc32ba61395233e4efa739f8a7c771ab9f 100644 (file)
@@ -12,4 +12,7 @@ struct nvkm_nvenc {
 };
 
 int gm107_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
+int tu102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
+int ga102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
+int ad102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h
new file mode 100644 (file)
index 0000000..80b7933
--- /dev/null
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_NVJPG_H__
+#define __NVKM_NVJPG_H__
+#include <core/engine.h>
+
+int ga100_nvjpg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int ad102_nvjpg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h
new file mode 100644 (file)
index 0000000..e72e211
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_OFA_H__
+#define __NVKM_OFA_H__
+#include <core/engine.h>
+
+int ga100_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int ga102_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int ad102_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+#endif
index 4f07836ab984e74636d8943b3f9e1700f054e2fb..874a5080ba0664bb54ff69d16b43894315d56251 100644 (file)
@@ -11,6 +11,10 @@ struct nvkm_bar {
        spinlock_t lock;
        bool bar2;
 
+       void __iomem *flushBAR2PhysMode;
+       struct nvkm_memory *flushFBZero;
+       void __iomem *flushBAR2;
+
        /* whether the BAR supports to be ioremapped WC or should be uncached */
        bool iomap_uncached;
 };
index b61cfb077533ac5c77a0cb9a3136db89e4ee138f..b4b7841e3b13fdd48f3960a615c3a98bf343aff1 100644 (file)
@@ -29,6 +29,7 @@ int nvbios_memcmp(struct nvkm_bios *, u32 addr, const char *, u32 len);
 u8  nvbios_rd08(struct nvkm_bios *, u32 addr);
 u16 nvbios_rd16(struct nvkm_bios *, u32 addr);
 u32 nvbios_rd32(struct nvkm_bios *, u32 addr);
+void *nvbios_pointer(struct nvkm_bios *, u32 addr);
 
 int nvkm_bios_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_bios **);
 #endif
index 1755b0df3cc1db62a8bb967bc88aebf52420b16c..5b798a1a313d8f80bc8d040088942e087e549695 100644 (file)
@@ -158,9 +158,9 @@ struct nvkm_ram {
        struct nvkm_ram_data target;
 };
 
-int
-nvkm_ram_get(struct nvkm_device *, u8 heap, u8 type, u8 page, u64 size,
-            bool contig, bool back, struct nvkm_memory **);
+int nvkm_ram_wrap(struct nvkm_device *, u64 addr, u64 size, struct nvkm_memory **);
+int nvkm_ram_get(struct nvkm_device *, u8 heap, u8 type, u8 page, u64 size,
+                bool contig, bool back, struct nvkm_memory **);
 
 struct nvkm_ram_func {
        u64 upper;
index 72619d7df73e7047d2ac9764ce0301d53d0e8ced..2fa0445d89280c6677e58ccafa8f91b5962b5f76 100644 (file)
 #define nvkm_gsp(p) container_of((p), struct nvkm_gsp, subdev)
 #include <core/subdev.h>
 #include <core/falcon.h>
+#include <core/firmware.h>
+
+#define GSP_PAGE_SHIFT 12
+#define GSP_PAGE_SIZE  BIT(GSP_PAGE_SHIFT)
+
+struct nvkm_gsp_mem {
+       u32 size;
+       void *data;
+       dma_addr_t addr;
+};
+
+struct nvkm_gsp_radix3 {
+       struct nvkm_gsp_mem mem[3];
+};
+
+int nvkm_gsp_sg(struct nvkm_device *, u64 size, struct sg_table *);
+void nvkm_gsp_sg_free(struct nvkm_device *, struct sg_table *);
+
+typedef int (*nvkm_gsp_msg_ntfy_func)(void *priv, u32 fn, void *repv, u32 repc);
+
+struct nvkm_gsp_event;
+typedef void (*nvkm_gsp_event_func)(struct nvkm_gsp_event *, void *repv, u32 repc);
 
 struct nvkm_gsp {
        const struct nvkm_gsp_func *func;
        struct nvkm_subdev subdev;
 
        struct nvkm_falcon falcon;
+
+       struct {
+               struct {
+                       const struct firmware *load;
+                       const struct firmware *unload;
+               } booter;
+               const struct firmware *bl;
+               const struct firmware *rm;
+       } fws;
+
+       struct nvkm_firmware fw;
+       struct nvkm_gsp_mem sig;
+       struct nvkm_gsp_radix3 radix3;
+
+       struct {
+               struct {
+                       struct {
+                               u64 addr;
+                               u64 size;
+                       } vga_workspace;
+                       u64 addr;
+                       u64 size;
+               } bios;
+               struct {
+                       struct {
+                               u64 addr;
+                               u64 size;
+                       } frts, boot, elf, heap;
+                       u64 addr;
+                       u64 size;
+               } wpr2;
+               struct {
+                       u64 addr;
+                       u64 size;
+               } heap;
+               u64 addr;
+               u64 size;
+
+               struct {
+                       u64 addr;
+                       u64 size;
+               } region[16];
+               int region_nr;
+               u32 rsvd_size;
+       } fb;
+
+       struct {
+               struct nvkm_falcon_fw load;
+               struct nvkm_falcon_fw unload;
+       } booter;
+
+       struct {
+               struct nvkm_gsp_mem fw;
+               u32 code_offset;
+               u32 data_offset;
+               u32 manifest_offset;
+               u32 app_version;
+       } boot;
+
+       struct nvkm_gsp_mem libos;
+       struct nvkm_gsp_mem loginit;
+       struct nvkm_gsp_mem logintr;
+       struct nvkm_gsp_mem logrm;
+       struct nvkm_gsp_mem rmargs;
+
+       struct nvkm_gsp_mem wpr_meta;
+
+       struct {
+               struct sg_table sgt;
+               struct nvkm_gsp_radix3 radix3;
+               struct nvkm_gsp_mem meta;
+       } sr;
+
+       struct {
+               struct nvkm_gsp_mem mem;
+
+               struct {
+                       int   nr;
+                       u32 size;
+                       u64 *ptr;
+               } ptes;
+
+               struct {
+                       u32  size;
+                       void *ptr;
+               } cmdq, msgq;
+       } shm;
+
+       struct nvkm_gsp_cmdq {
+               struct mutex mutex;
+               u32 cnt;
+               u32 seq;
+               u32 *wptr;
+               u32 *rptr;
+       } cmdq;
+
+       struct nvkm_gsp_msgq {
+               struct mutex mutex;
+               u32 cnt;
+               u32 *wptr;
+               u32 *rptr;
+               struct nvkm_gsp_msgq_ntfy {
+                       u32 fn;
+                       nvkm_gsp_msg_ntfy_func func;
+                       void *priv;
+               } ntfy[16];
+               int ntfy_nr;
+               struct work_struct work;
+       } msgq;
+
+       bool running;
+
+       /* Internal GSP-RM control handles. */
+       struct {
+               struct nvkm_gsp_client {
+                       struct nvkm_gsp_object {
+                               struct nvkm_gsp_client *client;
+                               struct nvkm_gsp_object *parent;
+                               u32 handle;
+                       } object;
+
+                       struct nvkm_gsp *gsp;
+
+                       struct list_head events;
+               } client;
+
+               struct nvkm_gsp_device {
+                       struct nvkm_gsp_object object;
+                       struct nvkm_gsp_object subdevice;
+               } device;
+       } internal;
+
+       struct {
+               enum nvkm_subdev_type type;
+               int inst;
+               u32 stall;
+               u32 nonstall;
+       } intr[32];
+       int intr_nr;
+
+       struct {
+               u64 rm_bar1_pdb;
+               u64 rm_bar2_pdb;
+       } bar;
+
+       struct {
+               u8 gpcs;
+               u8 tpcs;
+       } gr;
+
+       const struct nvkm_gsp_rm {
+               void *(*rpc_get)(struct nvkm_gsp *, u32 fn, u32 argc);
+               void *(*rpc_push)(struct nvkm_gsp *, void *argv, bool wait, u32 repc);
+               void (*rpc_done)(struct nvkm_gsp *gsp, void *repv);
+
+               void *(*rm_ctrl_get)(struct nvkm_gsp_object *, u32 cmd, u32 argc);
+               void *(*rm_ctrl_push)(struct nvkm_gsp_object *, void *argv, u32 repc);
+               void (*rm_ctrl_done)(struct nvkm_gsp_object *, void *repv);
+
+               void *(*rm_alloc_get)(struct nvkm_gsp_object *, u32 oclass, u32 argc);
+               void *(*rm_alloc_push)(struct nvkm_gsp_object *, void *argv, u32 repc);
+               void (*rm_alloc_done)(struct nvkm_gsp_object *, void *repv);
+
+               int (*rm_free)(struct nvkm_gsp_object *);
+
+               int (*client_ctor)(struct nvkm_gsp *, struct nvkm_gsp_client *);
+               void (*client_dtor)(struct nvkm_gsp_client *);
+
+               int (*device_ctor)(struct nvkm_gsp_client *, struct nvkm_gsp_device *);
+               void (*device_dtor)(struct nvkm_gsp_device *);
+
+               int (*event_ctor)(struct nvkm_gsp_device *, u32 handle, u32 id,
+                                 nvkm_gsp_event_func, struct nvkm_gsp_event *);
+               void (*event_dtor)(struct nvkm_gsp_event *);
+       } *rm;
+
+       struct {
+               struct mutex mutex;;
+               struct idr idr;
+       } client_id;
 };
 
+static inline bool
+nvkm_gsp_rm(struct nvkm_gsp *gsp)
+{
+       return gsp && (gsp->fws.rm || gsp->fw.img);
+}
+
+static inline void *
+nvkm_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc)
+{
+       return gsp->rm->rpc_get(gsp, fn, argc);
+}
+
+static inline void *
+nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
+{
+       return gsp->rm->rpc_push(gsp, argv, wait, repc);
+}
+
+static inline void *
+nvkm_gsp_rpc_rd(struct nvkm_gsp *gsp, u32 fn, u32 argc)
+{
+       void *argv = nvkm_gsp_rpc_get(gsp, fn, argc);
+
+       if (IS_ERR_OR_NULL(argv))
+               return argv;
+
+       return nvkm_gsp_rpc_push(gsp, argv, true, argc);
+}
+
+static inline int
+nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv, bool wait)
+{
+       void *repv = nvkm_gsp_rpc_push(gsp, argv, wait, 0);
+
+       if (IS_ERR(repv))
+               return PTR_ERR(repv);
+
+       return 0;
+}
+
+static inline void
+nvkm_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
+{
+       gsp->rm->rpc_done(gsp, repv);
+}
+
+static inline void *
+nvkm_gsp_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc)
+{
+       return object->client->gsp->rm->rm_ctrl_get(object, cmd, argc);
+}
+
+static inline void *
+nvkm_gsp_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
+{
+       return object->client->gsp->rm->rm_ctrl_push(object, argv, repc);
+}
+
+static inline void *
+nvkm_gsp_rm_ctrl_rd(struct nvkm_gsp_object *object, u32 cmd, u32 repc)
+{
+       void *argv = nvkm_gsp_rm_ctrl_get(object, cmd, repc);
+
+       if (IS_ERR(argv))
+               return argv;
+
+       return nvkm_gsp_rm_ctrl_push(object, argv, repc);
+}
+
+static inline int
+nvkm_gsp_rm_ctrl_wr(struct nvkm_gsp_object *object, void *argv)
+{
+       void *repv = nvkm_gsp_rm_ctrl_push(object, argv, 0);
+
+       if (IS_ERR(repv))
+               return PTR_ERR(repv);
+
+       return 0;
+}
+
+static inline void
+nvkm_gsp_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv)
+{
+       object->client->gsp->rm->rm_ctrl_done(object, repv);
+}
+
+static inline void *
+nvkm_gsp_rm_alloc_get(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u32 argc,
+                     struct nvkm_gsp_object *object)
+{
+       struct nvkm_gsp_client *client = parent->client;
+       struct nvkm_gsp *gsp = client->gsp;
+       void *argv;
+
+       object->client = parent->client;
+       object->parent = parent;
+       object->handle = handle;
+
+       argv = gsp->rm->rm_alloc_get(object, oclass, argc);
+       if (IS_ERR_OR_NULL(argv)) {
+               object->client = NULL;
+               return argv;
+       }
+
+       return argv;
+}
+
+static inline void *
+nvkm_gsp_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
+{
+       void *repv = object->client->gsp->rm->rm_alloc_push(object, argv, repc);
+
+       if (IS_ERR(repv))
+               object->client = NULL;
+
+       return repv;
+}
+
+static inline int
+nvkm_gsp_rm_alloc_wr(struct nvkm_gsp_object *object, void *argv)
+{
+       void *repv = nvkm_gsp_rm_alloc_push(object, argv, 0);
+
+       if (IS_ERR(repv))
+               return PTR_ERR(repv);
+
+       return 0;
+}
+
+static inline void
+nvkm_gsp_rm_alloc_done(struct nvkm_gsp_object *object, void *repv)
+{
+       object->client->gsp->rm->rm_alloc_done(object, repv);
+}
+
+static inline int
+nvkm_gsp_rm_alloc(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u32 argc,
+                 struct nvkm_gsp_object *object)
+{
+       void *argv = nvkm_gsp_rm_alloc_get(parent, handle, oclass, argc, object);
+
+       if (IS_ERR_OR_NULL(argv))
+               return argv ? PTR_ERR(argv) : -EIO;
+
+       return nvkm_gsp_rm_alloc_wr(object, argv);
+}
+
+static inline int
+nvkm_gsp_rm_free(struct nvkm_gsp_object *object)
+{
+       if (object->client)
+               return object->client->gsp->rm->rm_free(object);
+
+       return 0;
+}
+
+static inline int
+nvkm_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
+{
+       if (WARN_ON(!gsp->rm))
+               return -ENOSYS;
+
+       return gsp->rm->client_ctor(gsp, client);
+}
+
+static inline void
+nvkm_gsp_client_dtor(struct nvkm_gsp_client *client)
+{
+       if (client->gsp)
+               client->gsp->rm->client_dtor(client);
+}
+
+static inline int
+nvkm_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
+{
+       return client->gsp->rm->device_ctor(client, device);
+}
+
+static inline void
+nvkm_gsp_device_dtor(struct nvkm_gsp_device *device)
+{
+       if (device->object.client)
+               device->object.client->gsp->rm->device_dtor(device);
+}
+
+static inline int
+nvkm_gsp_client_device_ctor(struct nvkm_gsp *gsp,
+                           struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
+{
+       int ret = nvkm_gsp_client_ctor(gsp, client);
+
+       if (ret == 0) {
+               ret = nvkm_gsp_device_ctor(client, device);
+               if (ret)
+                       nvkm_gsp_client_dtor(client);
+       }
+
+       return ret;
+}
+
+struct nvkm_gsp_event {
+       struct nvkm_gsp_device *device;
+       u32 id;
+       nvkm_gsp_event_func func;
+
+       struct nvkm_gsp_object object;
+
+       struct list_head head;
+};
+
+static inline int
+nvkm_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
+                          nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
+{
+       return device->object.client->gsp->rm->event_ctor(device, handle, id, func, event);
+}
+
+static inline void
+nvkm_gsp_event_dtor(struct nvkm_gsp_event *event)
+{
+       struct nvkm_gsp_device *device = event->device;
+
+       if (device)
+               device->object.client->gsp->rm->event_dtor(event);
+}
+
+int nvkm_gsp_intr_stall(struct nvkm_gsp *, enum nvkm_subdev_type, int);
+int nvkm_gsp_intr_nonstall(struct nvkm_gsp *, enum nvkm_subdev_type, int);
+
 int gv100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int tu102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int tu116_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int ga100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
 int ga102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int ad102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
 #endif
index 92a36ddfc29ffe66a14a2648c9dc8a156440ecb2..e10cbd9203ec55ea9dff9fe993bb78e8645ddfd7 100644 (file)
@@ -8,6 +8,8 @@ struct nvkm_instmem {
        const struct nvkm_instmem_func *func;
        struct nvkm_subdev subdev;
 
+       bool suspend;
+
        spinlock_t lock;
        struct list_head list;
        struct list_head boot;
@@ -22,6 +24,11 @@ struct nvkm_instmem {
        struct nvkm_ramht  *ramht;
        struct nvkm_memory *ramro;
        struct nvkm_memory *ramfc;
+
+       struct {
+               struct sg_table fbsr;
+               bool fbsr_valid;
+       } rm;
 };
 
 u32 nvkm_instmem_rd32(struct nvkm_instmem *, u32 addr);
index 2fd2f2433fc7d40068873380321f353d277cae76..935b1cacd528e8416259803c533d4988004cc59f 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef __NVKM_MMU_H__
 #define __NVKM_MMU_H__
 #include <core/subdev.h>
+#include <subdev/gsp.h>
 
 struct nvkm_vma {
        struct list_head head;
@@ -63,6 +64,16 @@ struct nvkm_vmm {
        void *nullp;
 
        bool replay;
+
+       struct {
+               u64 bar2_pdb;
+
+               struct nvkm_gsp_client client;
+               struct nvkm_gsp_device device;
+               struct nvkm_gsp_object object;
+
+               struct nvkm_vma *rsvd;
+       } rm;
 };
 
 int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h
new file mode 100644 (file)
index 0000000..7157c77
--- /dev/null
@@ -0,0 +1,170 @@
+#ifndef __src_common_sdk_nvidia_inc_alloc_alloc_channel_h__
+#define __src_common_sdk_nvidia_inc_alloc_alloc_channel_h__
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NV_MEMORY_DESC_PARAMS {
+    NV_DECLARE_ALIGNED(NvU64 base, 8);
+    NV_DECLARE_ALIGNED(NvU64 size, 8);
+    NvU32 addressSpace;
+    NvU32 cacheAttrib;
+} NV_MEMORY_DESC_PARAMS;
+
+#define NVOS04_FLAGS_CHANNEL_TYPE                                  1:0
+#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL                         0x00000000
+#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL                          0x00000001  // OBSOLETE
+#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL             0x00000002  // OBSOLETE
+
+#define NVOS04_FLAGS_VPR                                           2:2
+#define NVOS04_FLAGS_VPR_FALSE                                     0x00000000
+#define NVOS04_FLAGS_VPR_TRUE                                      0x00000001
+
+#define NVOS04_FLAGS_CC_SECURE                                     2:2
+#define NVOS04_FLAGS_CC_SECURE_FALSE                               0x00000000
+#define NVOS04_FLAGS_CC_SECURE_TRUE                                0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING                  3:3
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE            0x00000000
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE             0x00000001
+
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE                       4:4
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT               0x00000000
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE                   0x00000001
+
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL                           5:5
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE                     0x00000000
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE                      0x00000001
+
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING                     6:6
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE               0x00000000
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE                0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE                7:7
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE          0x00000000
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE           0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE                    10:8
+
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED                    11:11
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE              0x00000000
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE               0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE               20:12
+
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED               21:21
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE         0x00000000
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE          0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV                 22:22
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE           0x00000000
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE            0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER                        23:23
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE                  0x00000000
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE                   0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO                      24:24
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE                0x00000000
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE                 0x00000001
+
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL           25:25
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE     0x00000000
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE      0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT                  26:26
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE            0x00000000
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE             0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT                 27:27
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE           0x00000000
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE            0x00000001
+
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD                          29:28
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT                  0x00000000
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE                      0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO                      0x00000002
+
+#define NVOS04_FLAGS_MAP_CHANNEL                                   30:30
+#define NVOS04_FLAGS_MAP_CHANNEL_FALSE                             0x00000000
+#define NVOS04_FLAGS_MAP_CHANNEL_TRUE                              0x00000001
+
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC                          31:31
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE                    0x00000000
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE                     0x00000001
+
+#define CC_CHAN_ALLOC_IV_SIZE_DWORD    3U
+#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U
+
+typedef struct NV_CHANNEL_ALLOC_PARAMS {
+
+    NvHandle hObjectError; // error context DMA
+    NvHandle hObjectBuffer; // no longer used
+    NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8);    // offset to beginning of GP FIFO
+    NvU32    gpFifoEntries;    // number of GP FIFO entries
+
+    NvU32    flags;
+
+
+    NvHandle hContextShare; // context share handle
+    NvHandle hVASpace; // VASpace for the channel
+
+    // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0
+    NvHandle hUserdMemory[NV_MAX_SUBDEVICES];
+
+    // offset to beginning of UserD within hUserdMemory[x]
+    NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8);
+
+    // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated
+    NvU32    engineType;
+    // Channel identifier that is unique for the duration of a RM session
+    NvU32    cid;
+    // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods
+    NvU32    subDeviceId;
+    NvHandle hObjectEccError; // ECC error context DMA
+
+    NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8);
+    NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8);
+    NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8);
+    NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8);
+
+    NvHandle hPhysChannelGroup;              // reserved
+    NvU32    internalFlags;                 // reserved
+    NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved
+    NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved
+    NvU32    ProcessID;                 // reserved
+    NvU32    SubProcessID;                 // reserved
+    // IV used for CPU-side encryption / GPU-side decryption.
+    NvU32    encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD];          // reserved
+    // IV used for CPU-side decryption / GPU-side encryption.
+    NvU32    decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD];          // reserved
+    // Nonce used CPU-side signing / GPU-side signature verification.
+    NvU32    hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD];       // reserved
+} NV_CHANNEL_ALLOC_PARAMS;
+
+typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h
new file mode 100644 (file)
index 0000000..7a3fc02
--- /dev/null
@@ -0,0 +1,38 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl0000_h__
+#define __src_common_sdk_nvidia_inc_class_cl0000_h__
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV01_ROOT        (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+typedef struct NV0000_ALLOC_PARAMETERS {
+    NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */
+    NvU32    processID;
+    char     processName[NV_PROC_NAME_MAX_LENGTH];
+} NV0000_ALLOC_PARAMETERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h
new file mode 100644 (file)
index 0000000..e4de36d
--- /dev/null
@@ -0,0 +1,38 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl0005_h__
+#define __src_common_sdk_nvidia_inc_class_cl0005_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NV0005_ALLOC_PARAMETERS {
+    NvHandle hParentClient;
+    NvHandle hSrcResource;
+
+    NvV32    hClass;
+    NvV32    notifyIndex;
+    NV_DECLARE_ALIGNED(NvP64 data, 8);
+} NV0005_ALLOC_PARAMETERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h
new file mode 100644 (file)
index 0000000..8868118
--- /dev/null
@@ -0,0 +1,43 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl0080_h__
+#define __src_common_sdk_nvidia_inc_class_cl0080_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV01_DEVICE_0      (0x80U) /* finn: Evaluated from "NV0080_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+typedef struct NV0080_ALLOC_PARAMETERS {
+    NvU32    deviceId;
+    NvHandle hClientShare;
+    NvHandle hTargetClient;
+    NvHandle hTargetDevice;
+    NvV32    flags;
+    NV_DECLARE_ALIGNED(NvU64 vaSpaceSize, 8);
+    NV_DECLARE_ALIGNED(NvU64 vaStartInternal, 8);
+    NV_DECLARE_ALIGNED(NvU64 vaLimitInternal, 8);
+    NvV32    vaMode;
+} NV0080_ALLOC_PARAMETERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h
new file mode 100644 (file)
index 0000000..9040ea5
--- /dev/null
@@ -0,0 +1,35 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl2080_h__
+#define __src_common_sdk_nvidia_inc_class_cl2080_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2002-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV20_SUBDEVICE_0      (0x2080U) /* finn: Evaluated from "NV2080_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+typedef struct NV2080_ALLOC_PARAMETERS {
+    NvU32 subDeviceId;
+} NV2080_ALLOC_PARAMETERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h
new file mode 100644 (file)
index 0000000..ba659d6
--- /dev/null
@@ -0,0 +1,62 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl2080_notification_h__
+#define __src_common_sdk_nvidia_inc_class_cl2080_notification_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV2080_NOTIFIERS_HOTPLUG                                   (1)
+
+#define NV2080_NOTIFIERS_DP_IRQ                                    (7)
+
+#define NV2080_ENGINE_TYPE_GRAPHICS                   (0x00000001)
+#define NV2080_ENGINE_TYPE_GR0                        NV2080_ENGINE_TYPE_GRAPHICS
+
+#define NV2080_ENGINE_TYPE_COPY0                      (0x00000009)
+
+#define NV2080_ENGINE_TYPE_BSP                        (0x00000013)
+#define NV2080_ENGINE_TYPE_NVDEC0                     NV2080_ENGINE_TYPE_BSP
+
+#define NV2080_ENGINE_TYPE_MSENC                      (0x0000001b)
+#define NV2080_ENGINE_TYPE_NVENC0                      NV2080_ENGINE_TYPE_MSENC  /* Mutually exclusive alias */
+
+#define NV2080_ENGINE_TYPE_SW                         (0x00000022)
+
+#define NV2080_ENGINE_TYPE_SEC2                       (0x00000026)
+
+#define NV2080_ENGINE_TYPE_NVJPG                      (0x0000002b)
+#define NV2080_ENGINE_TYPE_NVJPEG0                     NV2080_ENGINE_TYPE_NVJPG
+
+#define NV2080_ENGINE_TYPE_OFA                        (0x00000033)
+
+typedef struct {
+    NvU32 plugDisplayMask;
+    NvU32 unplugDisplayMask;
+} Nv2080HotplugNotification;
+
+typedef struct Nv2080DpIrqNotificationRec {
+    NvU32 displayId;
+} Nv2080DpIrqNotification;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h
new file mode 100644 (file)
index 0000000..9eb780a
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl84a0_h__
+#define __src_common_sdk_nvidia_inc_class_cl84a0_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV01_MEMORY_LIST_SYSTEM (0x00000081)
+
+#define NV01_MEMORY_LIST_FBMEM  (0x00000082)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h
new file mode 100644 (file)
index 0000000..f1d2177
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl90f1_h__
+#define __src_common_sdk_nvidia_inc_class_cl90f1_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define FERMI_VASPACE_A                                     (0x000090f1)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h
new file mode 100644 (file)
index 0000000..b8f3257
--- /dev/null
@@ -0,0 +1,34 @@
+#ifndef __src_common_sdk_nvidia_inc_class_clc0b5sw_h__
+#define __src_common_sdk_nvidia_inc_class_clc0b5sw_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NVC0B5_ALLOCATION_PARAMETERS {
+    NvU32 version;
+    NvU32 engineType;
+} NVC0B5_ALLOCATION_PARAMETERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h
new file mode 100644 (file)
index 0000000..58b3ba7
--- /dev/null
@@ -0,0 +1,39 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073common_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073common_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS {
+    NvBool bDscSupported;
+    NvU32  encoderColorFormatMask;
+    NvU32  lineBufferSizeKB;
+    NvU32  rateBufferSizeKB;
+    NvU32  bitsPerPixelPrecision;
+    NvU32  maxNumHztSlices;
+    NvU32  lineBufferBitDepth;
+} NV0073_CTRL_CMD_DSC_CAP_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h
new file mode 100644 (file)
index 0000000..596f2ea
--- /dev/null
@@ -0,0 +1,166 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dfp_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dfp_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 displayId;
+    NvU32 flags;
+    NvU32 flags2;
+} NV0073_CTRL_DFP_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL                                       2:0
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS                       (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS                       (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI                        (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT                (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI                        (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK                       (0x00000005U)
+#define NV0073_CTRL_DFP_FLAGS_LANE                                         5:3
+#define NV0073_CTRL_DFP_FLAGS_LANE_NONE                         (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE                       (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL                         (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD                         (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_OCT                          (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT                                        6:6
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE                     (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR                     (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER                                   7:7
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL                 (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE                (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE                                 8:8
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE                (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE                 (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE                        9:9
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE       (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE        (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE                         10:10
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE          (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE           (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE                    11:11
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE     (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE      (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE                    12:12
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE     (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE      (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED                               14:14
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE                (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE                 (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT                       15:15
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE        (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE         (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT                         16:16
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE           (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR     (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW                                 19:17
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS               (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS               (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS               (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS               (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LINK                                       21:20
+#define NV0073_CTRL_DFP_FLAGS_LINK_NONE                         (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE                       (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL                         (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID                           22:22
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE            (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE             (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID                              24:23
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE            (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A               (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B               (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED          (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED                   25:25
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE    (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE     (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT                      29:26
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE                        30:30
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE         (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE          (0x00000001U)
+
+#define NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS                         (0x731144U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER                      96U
+
+typedef struct NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 displayId;
+    NvU32 numELDSize;
+    NvU8  bufferELD[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER];
+    NvU32 maxFreqSupported;
+    NvU32 ctrl;
+    NvU32 deviceEntry;
+} NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS;
+
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD                                     0:0
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_FALSE              (0x00000000U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_TRUE               (0x00000001U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV                                   1:1
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_FALSE            (0x00000000U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_TRUE             (0x00000001U)
+
+#define NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE                (0x731150U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS {
+    NvU32  subDeviceInstance;
+    NvU32  displayId;
+    NvBool enable;
+} NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS;
+
+typedef NvU32 NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG;
+
+typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_INFO {
+    NvU32 displayMask;
+    NvU32 sorType;
+} NV0073_CTRL_DFP_ASSIGN_SOR_INFO;
+
+#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR           (0x731152U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS  4U
+
+typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS {
+    NvU32                                 subDeviceInstance;
+    NvU32                                 displayId;
+    NvU8                                  sorExcludeMask;
+    NvU32                                 slaveDisplayId;
+    NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG forceSublinkConfig;
+    NvBool                                bIs2Head1Or;
+    NvU32                                 sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
+    NV0073_CTRL_DFP_ASSIGN_SOR_INFO       sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
+    NvU8                                  reservedSorMask;
+    NvU32                                 flags;
+} NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS;
+
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO                                      0:0
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_OPTIMAL                    (0x00000001U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_DEFAULT                    (0x00000000U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE               1:1
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO  (0x00000000U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES (0x00000001U)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h
new file mode 100644 (file)
index 0000000..bae4b19
--- /dev/null
@@ -0,0 +1,335 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dp_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dp_h__
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV0073_CTRL_CMD_DP_AUXCH_CTRL      (0x731341U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE 16U
+
+typedef struct NV0073_CTRL_DP_AUXCH_CTRL_PARAMS {
+    NvU32  subDeviceInstance;
+    NvU32  displayId;
+    NvBool bAddrOnly;
+    NvU32  cmd;
+    NvU32  addr;
+    NvU8   data[NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE];
+    NvU32  size;
+    NvU32  replyType;
+    NvU32  retryTimeMs;
+} NV0073_CTRL_DP_AUXCH_CTRL_PARAMS;
+
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE                          3:3
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C               (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX               (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT                       2:2
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE          (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE           (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE                      1:0
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE         (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ          (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS  (0x00000002U)
+
+#define NV0073_CTRL_CMD_DP_CTRL                     (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_DP_CTRL_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 displayId;
+    NvU32 cmd;
+    NvU32 data;
+    NvU32 err;
+    NvU32 retryTimeMs;
+    NvU32 eightLaneDpcdBaseAddr;
+} NV0073_CTRL_DP_CTRL_PARAMS;
+
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT                           0:0
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE                         (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE                          (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW                              1:1
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW_FALSE                            (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW_TRUE                             (0x00000001U)
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD                       2:2
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE                     (0x00000000U)
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE                      (0x00000001U)
+#define NV0073_CTRL_DP_CMD_UNUSED                                   3:3
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE                          4:4
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM                (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM                 (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING                       5:5
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO                        (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES                       (0x00000001U)
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING                         6:6
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO                          (0x00000000U)
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES                         (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING                     7:7
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_FALSE                   (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_TRUE                    (0x00000001U)
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING                   8:8
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT               (0x00000000U)
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE                 (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING                      9:9
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO                       (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES                      (0x00000001U)
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED                10:10
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO                   (0x00000000U)
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES                  (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING                     12:11
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO                        (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON    (0x00000002U)
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER                     13:13
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO                        (0x00000000U)
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES                       (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG                        14:14
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE                        (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_TRUE                         (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC                             15:15
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE                             (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE                              (0x00000001U)
+
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST                         29:29
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO                            (0x00000000U)
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES                           (0x00000001U)
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE              30:30
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE              (0x00000000U)
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE               (0x00000001U)
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG                    31:31
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE                    (0x00000000U)
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE                     (0x00000001U)
+
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT                          4:0
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0                            (0x00000000U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1                            (0x00000001U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2                            (0x00000002U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4                            (0x00000004U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8                            (0x00000008U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW                            15:8
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS                        (0x00000006U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_16GBPS                        (0x00000008U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_43GBPS                        (0x00000009U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS                        (0x0000000AU)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_3_24GBPS                        (0x0000000CU)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_4_32GBPS                        (0x00000010U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS                        (0x00000014U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS                        (0x0000001EU)
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING                  18:18
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_NO                     (0x00000000U)
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_YES                    (0x00000001U)
+#define NV0073_CTRL_DP_DATA_TARGET                                22:19
+#define NV0073_CTRL_DP_DATA_TARGET_SINK                                 (0x00000000U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_0                       (0x00000001U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_1                       (0x00000002U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_2                       (0x00000003U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_3                       (0x00000004U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_4                       (0x00000005U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_5                       (0x00000006U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_6                       (0x00000007U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_7                       (0x00000008U)
+
+#define NV0073_CTRL_MAX_LANES                                           8U
+
+typedef struct NV0073_CTRL_DP_LANE_DATA_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 displayId;
+    NvU32 numLanes;
+    NvU32 data[NV0073_CTRL_MAX_LANES];
+} NV0073_CTRL_DP_LANE_DATA_PARAMS;
+
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS                   1:0
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE    (0x00000000U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1  (0x00000001U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2  (0x00000002U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3  (0x00000003U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT                  3:2
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 (0x00000000U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 (0x00000001U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 (0x00000002U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 (0x00000003U)
+
+#define NV0073_CTRL_CMD_DP_SET_LANE_DATA (0x731346U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_LANE_DATA_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM      (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 displayId;
+    NvU32 mute;
+} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID  (0x73135bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS {
+    NvU32  subDeviceInstance;
+    NvU32  displayId;
+    NvU32  preferredDisplayId;
+
+    NvBool force;
+    NvBool useBFM;
+
+    NvU32  displayIdAssigned;
+    NvU32  allDisplayMask;
+} NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID (0x73135cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 displayId;
+} NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CONFIG_STREAM                   (0x731362U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS {
+    NvU32  subDeviceInstance;
+    NvU32  head;
+    NvU32  sorIndex;
+    NvU32  dpLink;
+
+    NvBool bEnableOverride;
+    NvBool bMST;
+    NvU32  singleHeadMultistreamMode;
+    NvU32  hBlankSym;
+    NvU32  vBlankSym;
+    NvU32  colorFormat;
+    NvBool bEnableTwoHeadOneOr;
+
+    struct {
+        NvU32  slotStart;
+        NvU32  slotEnd;
+        NvU32  PBN;
+        NvU32  Timeslice;
+        NvBool sendACT;          // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT
+        NvU32  singleHeadMSTPipeline;
+        NvBool bEnableAudioOverRightPanel;
+    } MST;
+
+    struct {
+        NvBool bEnhancedFraming;
+        NvU32  tuSize;
+        NvU32  waterMark;
+        NvU32  actualPclkHz;     // deprecated  -Use MvidWarParams
+        NvU32  linkClkFreqHz;    // deprecated  -Use MvidWarParams
+        NvBool bEnableAudioOverRightPanel;
+        struct {
+            NvU32  activeCnt;
+            NvU32  activeFrac;
+            NvU32  activePolarity;
+            NvBool mvidWarEnabled;
+            struct {
+                NvU32 actualPclkHz;
+                NvU32 linkClkFreqHz;
+            } MvidWarParams;
+        } Legacy;
+    } SST;
+} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT                    (0x731365U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS {
+    NvU32 subDeviceInstance;
+} NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS   (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U)
+
+typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS {
+    NvU32                          subDeviceInstance;
+    NvU32                          sorIndex;
+    NvU32                          maxLinkRate;
+    NvU32                          dpVersionsSupported;
+    NvU32                          UHBRSupported;
+    NvBool                         bIsMultistreamSupported;
+    NvBool                         bIsSCEnabled;
+    NvBool                         bHasIncreasedWatermarkLimits;
+    NvBool                         bIsPC2Disabled;
+    NvBool                         isSingleHeadMSTSupported;
+    NvBool                         bFECSupported;
+    NvBool                         bIsTrainPhyRepeater;
+    NvBool                         bOverrideLinkBw;
+    NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC;
+} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2                0:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO              (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES             (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4                1:1
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO              (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES             (0x00000001U)
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE                           2:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE                          (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62                          (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70                          (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40                          (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10                          (0x00000004U)
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB                (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444        (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U)
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16           (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8            (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4            (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2            (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1              (0x00000005U)
+
+#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES        8U
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS {
+    // In
+    NvU32 subDeviceInstance;
+    NvU32 displayId;
+    NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+
+    // Out
+    NvU8  linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+    NvU8  linkBwCount;
+} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS;
+
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE                                   3:0
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_BEGIN     (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHALLENGE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHECK     (0x00000002U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_BEGIN      (0x00000003U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHALLENGE  (0x00000004U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHECK      (0x00000005U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_RESET_MONITOR            (0x00000006U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_INIT_PUBLIC_INFO         (0x00000007U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_GET_PUBLIC_INFO          (0x00000008U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_STATUS_CHECK             (0x00000009U)
+
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK                          (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING                     (0x80000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR                  (0x80000002U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_WRITE_ERROR                 (0x80000003U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_DEVICE_ERROR                (0x80000004U)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h
new file mode 100644 (file)
index 0000000..954958d
--- /dev/null
@@ -0,0 +1,216 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073specific_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073specific_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2         (0x730245U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES 2048U
+
+typedef struct NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 displayId;
+    NvU32 bufferSize;
+    NvU32 flags;
+    NvU8  edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES];
+} NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA   (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_MAX_CONNECTORS                    4U
+
+typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 displayId;
+    NvU32 flags;
+    NvU32 DDCPartners;
+    NvU32 count;
+    struct {
+        NvU32 index;
+        NvU32 type;
+        NvU32 location;
+    } data[NV0073_CTRL_MAX_CONNECTORS];
+    NvU32 platform;
+} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE (0x730273U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS {
+    NvU8  subDeviceInstance;
+    NvU32 displayId;
+    NvU8  enable;
+} NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM (0x730275U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS {
+    NvU8  subDeviceInstance;
+    NvU32 displayId;
+    NvU8  mute;
+} NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK (0x730287U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 headMask;
+} NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET (0x730288U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_SET_OD_MAX_PACKET_SIZE     36U
+
+typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS {
+    NvU32  subDeviceInstance;
+    NvU32  displayId;
+    NvU32  transmitControl;
+    NvU32  packetSize;
+    NvU32  targetHead;
+    NvBool bUsePsrHeadforSdp;
+    NvU8   aPacket[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE];
+} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS;
+
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE                                     0:0
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO                      (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES                     (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME                                1:1
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE            (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE             (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME                               2:2
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE           (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE            (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK                                 3:3
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE              (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE               (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE                                  4:4
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_DISABLE              (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_ENABLE               (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT                                  5:5
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED        (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED        (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY                        6:6
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE      (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE       (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING                   7:7
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE  (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE                         9:8
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME0  (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME1  (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE                     31:31
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO        (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES       (0x0000001U)
+
+#define NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO (0x73028bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS {
+    NvU32  subDeviceInstance;
+    NvU32  displayId;
+    NvU32  index;
+    NvU32  type;
+    NvU32  protocol;
+    NvU32  ditherType;
+    NvU32  ditherAlgo;
+    NvU32  location;
+    NvU32  rootPortId;
+    NvU32  dcbIndex;
+    NV_DECLARE_ALIGNED(NvU64 vbiosAddress, 8);
+    NvBool bIsLitByVbios;
+    NvBool bIsDispDynamic;
+} NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_NONE                   (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_DAC                    (0x00000001U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_SOR                    (0x00000002U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR                   (0x00000003U)
+
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_DSI                    (0x00000005U)
+
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT        (0x00000000U)
+
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM    (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A  (0x00000001U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B  (0x00000002U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS      (0x00000005U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A           (0x00000008U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B           (0x00000009U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DSI            (0x00000010U)
+
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI                (0x00000011U)
+
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC  (0x00000000U)
+
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN            (0xFFFFFFFFU)
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS {
+    NvU32  subDeviceInstance;
+    NvU32  displayId;
+    NvU32  brightness;
+    NvBool bUncalibrated;
+} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 displayId;
+    NvU32 caps;
+} NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED                           0:0
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_FALSE       (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_TRUE        (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED                     1:1
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_TRUE  (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED                                      2:2
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_FALSE                  (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_TRUE                   (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED                              5:3
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_NONE           (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_3G      (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_6G      (0x00000002U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_6G      (0x00000003U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_8G      (0x00000004U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_10G     (0x00000005U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_12G     (0x00000006U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED                                    6:6
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_FALSE                (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_TRUE                 (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED                          9:7
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_NONE       (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_3G  (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_6G  (0x00000002U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_6G  (0x00000003U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_8G  (0x00000004U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h
new file mode 100644 (file)
index 0000000..d69cef3
--- /dev/null
@@ -0,0 +1,65 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073system_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073system_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS (0x730102U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 flags;
+    NvU32 numHeads;
+} NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730120U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 displayMask;
+    NvU32 displayMaskDDC;
+} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730122U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 flags;
+    NvU32 displayMask;
+    NvU32 retryTimeMs;
+} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE                (0x730126U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 head;
+    NvU32 flags;
+    NvU32 displayId;
+} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS             (16U)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h
new file mode 100644 (file)
index 0000000..6acb3f7
--- /dev/null
@@ -0,0 +1,57 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080fifo_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080fifo_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID                          4:0
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS                 (0x00000000)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VLD                      (0x00000001)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VIDEO                    (0x00000002)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_MPEG                     (0x00000003)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_CAPTURE                  (0x00000004)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_DISPLAY                  (0x00000005)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_ENCRYPTION               (0x00000006)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_POSTPROCESS              (0x00000007)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ZCULL           (0x00000008)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PM              (0x00000009)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COMPUTE_PREEMPT          (0x0000000a)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PREEMPT         (0x0000000b)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SPILL           (0x0000000c)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL        (0x0000000d)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BETACB          (0x0000000e)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV             (0x0000000f)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PATCH           (0x00000010)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BUNDLE_CB       (0x00000011)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL_GLOBAL (0x00000012)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB    (0x00000013)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL   (0x00000014)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_POOL       (0x00000015)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_CTRL_BLK   (0x00000016)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_FECS_EVENT      (0x00000017)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT                    (0x00000019)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h
new file mode 100644 (file)
index 0000000..3db099e
--- /dev/null
@@ -0,0 +1,48 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gpu_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gpu_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
+    NvU32  totalVFs;
+    NvU32  firstVfOffset;
+    NvU32  vfFeatureMask;
+    NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8);
+    NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8);
+    NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8);
+    NV_DECLARE_ALIGNED(NvU64 bar0Size, 8);
+    NV_DECLARE_ALIGNED(NvU64 bar1Size, 8);
+    NV_DECLARE_ALIGNED(NvU64 bar2Size, 8);
+    NvBool b64bitBar0;
+    NvBool b64bitBar1;
+    NvBool b64bitBar2;
+    NvBool bSriovEnabled;
+    NvBool bSriovHeavyEnabled;
+    NvBool bEmulateVFBar0TlbInvalidationRegister;
+    NvBool bClientRmAllocatedCtxBuffer;
+} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h
new file mode 100644 (file)
index 0000000..ed01df9
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gr_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gr_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV0080_CTRL_GR_CAPS_TBL_SIZE            23
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h
new file mode 100644 (file)
index 0000000..b5b7631
--- /dev/null
@@ -0,0 +1,40 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080bios_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080bios_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
+    NvU32 BoardID;
+    char  chipSKU[4];
+    char  chipSKUMod[2];
+    char  project[5];
+    char  projectSKU[5];
+    char  CDP[6];
+    char  projectSKUMod[2];
+    NvU32 businessCycle;
+} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h
new file mode 100644 (file)
index 0000000..fe912d2
--- /dev/null
@@ -0,0 +1,35 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080ce_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080ce_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS {
+    NvU32 size;
+} NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS;
+
+#define NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE (0x20802a08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID" */
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h
new file mode 100644 (file)
index 0000000..87bc4ff
--- /dev/null
@@ -0,0 +1,41 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080event_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080event_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION (0x20800301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS {
+    NvU32  event;
+    NvU32  action;
+    NvBool bNotifyState;
+    NvU32  info32;
+    NvU16  info16;
+} NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS;
+
+#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT  (0x00000002)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h
new file mode 100644 (file)
index 0000000..68c81f9
--- /dev/null
@@ -0,0 +1,51 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fb_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fb_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES   17U
+
+typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES];
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
+    NV_DECLARE_ALIGNED(NvU64 base, 8);
+    NV_DECLARE_ALIGNED(NvU64 limit, 8);
+    NV_DECLARE_ALIGNED(NvU64 reserved, 8);
+    NvU32                                                  performance;
+    NvBool                                                 supportCompressed;
+    NvBool                                                 supportISO;
+    NvBool                                                 bProtected;
+    NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList;
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO;
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
+    NvU32 numFBRegions;
+    NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8);
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h
new file mode 100644 (file)
index 0000000..bc0f636
--- /dev/null
@@ -0,0 +1,52 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fifo_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fifo_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE                 (0x20801112) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES         32
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES   16
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA    2
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN 16
+
+typedef struct NV2080_CTRL_FIFO_DEVICE_ENTRY {
+    NvU32 engineData[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES];
+    NvU32 pbdmaIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
+    NvU32 pbdmaFaultIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
+    NvU32 numPbdmas;
+    char  engineName[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN];
+} NV2080_CTRL_FIFO_DEVICE_ENTRY;
+
+typedef struct NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS {
+    NvU32                         baseIndex;
+    NvU32                         numEntries;
+    NvBool                        bMore;
+    // C form: NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
+    NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
+} NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h
new file mode 100644 (file)
index 0000000..29d7a10
--- /dev/null
@@ -0,0 +1,100 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gpu_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gpu_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV2080_GPU_MAX_NAME_STRING_LENGTH                  (0x0000040U)
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0            (0x00000000U)
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3            (0x00000003U)
+
+typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY {
+    NV_DECLARE_ALIGNED(NvU64 gpuPhysAddr, 8);
+    NV_DECLARE_ALIGNED(NvU64 gpuVirtAddr, 8);
+    NV_DECLARE_ALIGNED(NvU64 size, 8);
+    NvU32 physAttr;
+    NvU16 bufferId;
+    NvU8  bInitialize;
+    NvU8  bNonmapped;
+} NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY;
+
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN                         0U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM                           1U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH                        2U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB             3U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL                     4U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB                 5U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL                6U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL                    7U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK                8U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT                   9U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP              10U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP 11U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP       12U
+
+#define NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES                        16U
+
+#define NV2080_CTRL_CMD_GPU_PROMOTE_CTX                                    (0x2080012bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS {
+    NvU32    engineType;
+    NvHandle hClient;
+    NvU32    ChID;
+    NvHandle hChanClient;
+    NvHandle hObject;
+    NvHandle hVirtMemory;
+    NV_DECLARE_ALIGNED(NvU64 virtAddress, 8);
+    NV_DECLARE_ALIGNED(NvU64 size, 8);
+    NvU32    entryCount;
+    // C form: NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES];
+    NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES], 8);
+} NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS {
+    NvU32 gpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS {
+    NvU32 gpcId;
+    NvU32 tpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS {
+    NvU32 gpcId;
+    NvU32 zcullMask;
+} NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS;
+
+#define NV2080_GPU_MAX_GID_LENGTH             (0x000000100ULL)
+
+typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
+    NvU32 index;
+    NvU32 flags;
+    NvU32 length;
+    NvU8  data[NV2080_GPU_MAX_GID_LENGTH];
+} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h
new file mode 100644 (file)
index 0000000..59f8895
--- /dev/null
@@ -0,0 +1,41 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gr_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gr_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef enum NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS {
+    NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_MAIN = 0,
+    NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_SPILL = 1,
+    NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_PAGEPOOL = 2,
+    NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_BETACB = 3,
+    NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_RTV = 4,
+    NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL = 5,
+    NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL = 6,
+    NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL_CPU = 7,
+    NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END = 8,
+} NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h
new file mode 100644 (file)
index 0000000..e11b2db
--- /dev/null
@@ -0,0 +1,162 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080internal_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080internal_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS {
+    NvU32  feHwSysCap;
+    NvU32  windowPresentMask;
+    NvBool bFbRemapperEnabled;
+    NvU32  numHeads;
+    NvBool bPrimaryVga;
+    NvU32  i2cPort;
+    NvU32  internalDispActiveMask;
+} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS;
+
+#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES         8
+
+#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x19
+
+typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO {
+    NvU32 size;
+    NvU32 alignment;
+} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO {
+    NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT];
+} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS {
+    NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES];
+} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO {
+    NvU32 engDesc;
+    NvU32 ctxAttr;
+    NvU32 ctxBufferSize;
+    NvU32 addrSpaceList;
+    NvU32 registerBase;
+} NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO;
+#define NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS     0x40
+
+#define NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO (0x20800a42) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS {
+    NvU32                                        numConstructedFalcons;
+    NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS];
+} NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM (0x20800a49) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS {
+    NV_DECLARE_ALIGNED(NvU64 instMemPhysAddr, 8);
+    NV_DECLARE_ALIGNED(NvU64 instMemSize, 8);
+    NvU32 instMemAddrSpace;
+    NvU32 instMemCpuCacheAttr;
+} NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS {
+    NvU32  addressSpace;
+    NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8);
+    NV_DECLARE_ALIGNED(NvU64 limit, 8);
+    NvU32  cacheSnoop;
+    NvU32  hclass;
+    NvU32  channelInstance;
+    NvBool valid;
+} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */
+
+#define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE       128
+
+typedef enum NV2080_INTR_CATEGORY {
+    NV2080_INTR_CATEGORY_DEFAULT = 0,
+    NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE = 1,
+    NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE_NOTIFICATION = 2,
+    NV2080_INTR_CATEGORY_RUNLIST = 3,
+    NV2080_INTR_CATEGORY_RUNLIST_NOTIFICATION = 4,
+    NV2080_INTR_CATEGORY_UVM_OWNED = 5,
+    NV2080_INTR_CATEGORY_UVM_SHARED = 6,
+    NV2080_INTR_CATEGORY_ENUM_COUNT = 7,
+} NV2080_INTR_CATEGORY;
+
+typedef struct NV2080_INTR_CATEGORY_SUBTREE_MAP {
+    NvU8 subtreeStart;
+    NvU8 subtreeEnd;
+} NV2080_INTR_CATEGORY_SUBTREE_MAP;
+
+typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY {
+    NvU16 engineIdx;
+    NvU32 pmcIntrMask;
+    NvU32 vectorStall;
+    NvU32 vectorNonStall;
+} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY;
+
+typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS {
+    NvU32                                            tableLen;
+    NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY table[NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE];
+    NV2080_INTR_CATEGORY_SUBTREE_MAP                 subtreeMap[NV2080_INTR_CATEGORY_ENUM_COUNT];
+} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS {
+    NvU32    fbsrType;
+    NvU32    numRegions;
+    NvHandle hClient;
+    NvHandle hSysMem;
+    NV_DECLARE_ALIGNED(NvU64 gspFbAllocsSysOffset, 8);
+    NvBool   bEnteringGcoffState;
+} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS {
+    NvU32    fbsrType;
+    NvHandle hClient;
+    NvHandle hVidMem;
+    NV_DECLARE_ALIGNED(NvU64 vidOffset, 8);
+    NV_DECLARE_ALIGNED(NvU64 sysOffset, 8);
+    NV_DECLARE_ALIGNED(NvU64 size, 8);
+} NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD (0x20800ac6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS_MESSAGE_ID" */
+
+#define NV2080_CTRL_ACPI_DSM_READ_SIZE                   (0x1000) /* finn: Evaluated from "(4 * 1024)" */
+
+typedef struct NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS {
+    NvU32 status;
+    NvU16 backLightDataSize;
+    NvU8  backLightData[NV2080_CTRL_ACPI_DSM_READ_SIZE];
+} NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h
new file mode 100644 (file)
index 0000000..977e598
--- /dev/null
@@ -0,0 +1,95 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl90f1_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl90f1_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define GMMU_FMT_MAX_LEVELS  6U
+
+#define NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES (0x90f10106U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID" */
+
+typedef struct NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS {
+    /*!
+     * [in] GPU sub-device handle - this API only supports unicast.
+     *      Pass 0 to use subDeviceId instead.
+     */
+    NvHandle hSubDevice;
+
+    /*!
+     * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero.
+     */
+    NvU32    subDeviceId;
+
+    /*!
+     * [in] Page size (VA coverage) of the level to reserve.
+     *      This need not be a leaf (page table) page size - it can be
+     *      the coverage of an arbitrary level (including root page directory).
+     */
+    NV_DECLARE_ALIGNED(NvU64 pageSize, 8);
+
+    /*!
+     * [in] First GPU virtual address of the range to reserve.
+     *      This must be aligned to pageSize.
+     */
+    NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8);
+
+    /*!
+     * [in] Last GPU virtual address of the range to reserve.
+     *      This (+1) must be aligned to pageSize.
+     */
+    NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8);
+
+    /*! 
+     * [in] Number of PDE levels to copy.
+     */
+    NvU32    numLevelsToCopy;
+
+   /*!
+     * [in] Per-level information.
+     */
+    struct {
+        /*!
+         * Physical address of this page level instance.
+         */
+        NV_DECLARE_ALIGNED(NvU64 physAddress, 8);
+
+        /*!
+         * Size in bytes allocated for this level instance.
+         */
+        NV_DECLARE_ALIGNED(NvU64 size, 8);
+
+        /*!
+         * Aperture in which this page level instance resides.
+         */
+        NvU32 aperture;
+
+        /*!
+         * Page shift corresponding to the level
+         */
+        NvU8  pageShift;
+    } levels[GMMU_FMT_MAX_LEVELS];
+} NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h
new file mode 100644 (file)
index 0000000..6840457
--- /dev/null
@@ -0,0 +1,42 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrla06f_ctrla06fgpfifo_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrla06f_ctrla06fgpfifo_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NVA06F_CTRL_CMD_GPFIFO_SCHEDULE (0xa06f0103) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID" */
+
+typedef struct NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS {
+    NvBool bEnable;
+    NvBool bSkipSubmit;
+} NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS;
+
+#define NVA06F_CTRL_CMD_BIND (0xa06f0104) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_BIND_PARAMS_MESSAGE_ID" */
+
+typedef struct NVA06F_CTRL_BIND_PARAMS {
+    NvU32 engineType;
+} NVA06F_CTRL_BIND_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h
new file mode 100644 (file)
index 0000000..5c5a004
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef __src_common_sdk_nvidia_inc_nvlimits_h__
+#define __src_common_sdk_nvidia_inc_nvlimits_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV_MAX_SUBDEVICES       8
+
+#define NV_PROC_NAME_MAX_LENGTH 100U
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h
new file mode 100644 (file)
index 0000000..51b5591
--- /dev/null
@@ -0,0 +1,148 @@
+#ifndef __src_common_sdk_nvidia_inc_nvos_h__
+#define __src_common_sdk_nvidia_inc_nvos_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NVOS02_FLAGS_PHYSICALITY                                   7:4
+#define NVOS02_FLAGS_PHYSICALITY_CONTIGUOUS                        (0x00000000)
+#define NVOS02_FLAGS_PHYSICALITY_NONCONTIGUOUS                     (0x00000001)
+#define NVOS02_FLAGS_LOCATION                                      11:8
+#define NVOS02_FLAGS_LOCATION_PCI                                  (0x00000000)
+#define NVOS02_FLAGS_LOCATION_AGP                                  (0x00000001)
+#define NVOS02_FLAGS_LOCATION_VIDMEM                               (0x00000002)
+#define NVOS02_FLAGS_COHERENCY                                     15:12
+#define NVOS02_FLAGS_COHERENCY_UNCACHED                            (0x00000000)
+#define NVOS02_FLAGS_COHERENCY_CACHED                              (0x00000001)
+#define NVOS02_FLAGS_COHERENCY_WRITE_COMBINE                       (0x00000002)
+#define NVOS02_FLAGS_COHERENCY_WRITE_THROUGH                       (0x00000003)
+#define NVOS02_FLAGS_COHERENCY_WRITE_PROTECT                       (0x00000004)
+#define NVOS02_FLAGS_COHERENCY_WRITE_BACK                          (0x00000005)
+#define NVOS02_FLAGS_ALLOC                                         17:16
+#define NVOS02_FLAGS_ALLOC_NONE                                    (0x00000001)
+#define NVOS02_FLAGS_GPU_CACHEABLE                                 18:18
+#define NVOS02_FLAGS_GPU_CACHEABLE_NO                              (0x00000000)
+#define NVOS02_FLAGS_GPU_CACHEABLE_YES                             (0x00000001)
+
+#define NVOS02_FLAGS_KERNEL_MAPPING                                19:19
+#define NVOS02_FLAGS_KERNEL_MAPPING_NO_MAP                         (0x00000000)
+#define NVOS02_FLAGS_KERNEL_MAPPING_MAP                            (0x00000001)
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY                            20:20
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_NO                         (0x00000000)
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_YES                        (0x00000001)
+
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY                          21:21
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_NO                       (0x00000000)
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_YES                      (0x00000001)
+
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY                        22:22
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_NO                     (0x00000000)
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_YES                    (0x00000001)
+
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE                             23:23
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_DEFAULT                     (0x00000000)
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_REQUIRED                    (0x00000001)
+
+#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT                          24:24
+#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT_APERTURE                 (0x00000001)
+
+#define NVOS02_FLAGS_MEMORY_PROTECTION                             26:25
+#define NVOS02_FLAGS_MEMORY_PROTECTION_DEFAULT                     (0x00000000)
+#define NVOS02_FLAGS_MEMORY_PROTECTION_PROTECTED                   (0x00000001)
+#define NVOS02_FLAGS_MEMORY_PROTECTION_UNPROTECTED                 (0x00000002)
+
+#define NVOS02_FLAGS_MAPPING                                       31:30
+#define NVOS02_FLAGS_MAPPING_DEFAULT                               (0x00000000)
+#define NVOS02_FLAGS_MAPPING_NO_MAP                                (0x00000001)
+#define NVOS02_FLAGS_MAPPING_NEVER_MAP                             (0x00000002)
+
+#define NV01_EVENT_CLIENT_RM                                       (0x04000000)
+
+typedef struct
+{
+    NvV32    channelInstance;            // One of the n channel instances of a given channel type.
+                                         // Note that core channel has only one instance
+                                         // while all others have two (one per head).
+    NvHandle hObjectBuffer;              // ctx dma handle for DMA push buffer
+    NvHandle hObjectNotify;              // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications
+    NvU32    offset;                     // Initial offset for put/get, usually zero.
+    NvP64    pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs
+
+    NvU32    flags;
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB                1:1
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES            0x00000000
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO             0x00000001
+
+} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+    NvV32    channelInstance;            // One of the n channel instances of a given channel type.
+                                         // All PIO channels have two instances (one per head).
+    NvHandle hObjectNotify;              // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors.
+    NvP64    pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of control region for PIO channel
+} NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+    NvU32 size;
+    NvU32 prohibitMultipleInstances;
+    NvU32 engineInstance;               // Select NVDEC0 or NVDEC1 or NVDEC2
+} NV_BSP_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+    NvU32 size;
+    NvU32 prohibitMultipleInstances;  // Prohibit multiple allocations of MSENC?
+    NvU32 engineInstance;             // Select MSENC/NVENC0 or NVENC1 or NVENC2
+} NV_MSENC_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+    NvU32 size;
+    NvU32 prohibitMultipleInstances;  // Prohibit multiple allocations of NVJPG?
+    NvU32 engineInstance;
+} NV_NVJPG_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+    NvU32 size;
+    NvU32 prohibitMultipleInstances;  // Prohibit multiple allocations of OFA?
+} NV_OFA_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+    NvU32   index;
+    NvV32   flags;
+    NvU64   vaSize NV_ALIGN_BYTES(8);
+    NvU64   vaStartInternal NV_ALIGN_BYTES(8);
+    NvU64   vaLimitInternal NV_ALIGN_BYTES(8);
+    NvU32   bigPageSize;
+    NvU64   vaBase NV_ALIGN_BYTES(8);
+} NV_VASPACE_ALLOCATION_PARAMETERS;
+
+#define NV_VASPACE_ALLOCATION_INDEX_GPU_NEW                                 0x00 //<! Create new VASpace, by default
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h
new file mode 100644 (file)
index 0000000..5a2f273
--- /dev/null
@@ -0,0 +1,46 @@
+#ifndef __src_common_shared_msgq_inc_msgq_msgq_priv_h__
+#define __src_common_shared_msgq_inc_msgq_msgq_priv_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct
+{
+    NvU32 version;   // queue version
+    NvU32 size;      // bytes, page aligned
+    NvU32 msgSize;   // entry size, bytes, must be power-of-2, 16 is minimum
+    NvU32 msgCount;  // number of entries in queue
+    NvU32 writePtr;  // message id of next slot
+    NvU32 flags;     // if set it means "i want to swap RX"
+    NvU32 rxHdrOff;  // Offset of msgqRxHeader from start of backing store.
+    NvU32 entryOff;  // Offset of entries from start of backing store.
+} msgqTxHeader;
+
+typedef struct
+{
+    NvU32 readPtr; // message id of last message read
+} msgqRxHeader;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h
new file mode 100644 (file)
index 0000000..83cf1b2
--- /dev/null
@@ -0,0 +1,52 @@
+#ifndef __src_common_uproc_os_common_include_libos_init_args_h__
+#define __src_common_uproc_os_common_include_libos_init_args_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef NvU64 LibosAddress;
+
+typedef enum {
+    LIBOS_MEMORY_REGION_NONE,
+    LIBOS_MEMORY_REGION_CONTIGUOUS,
+    LIBOS_MEMORY_REGION_RADIX3
+} LibosMemoryRegionKind;
+
+typedef enum {
+    LIBOS_MEMORY_REGION_LOC_NONE,
+    LIBOS_MEMORY_REGION_LOC_SYSMEM,
+    LIBOS_MEMORY_REGION_LOC_FB
+} LibosMemoryRegionLoc;
+
+typedef struct
+{
+    LibosAddress          id8;  // Id tag.
+    LibosAddress          pa;   // Physical address.
+    LibosAddress          size; // Size of memory area.
+    NvU8                  kind; // See LibosMemoryRegionKind above.
+    NvU8                  loc;  // See LibosMemoryRegionLoc above.
+} LibosMemoryRegionInitArgument;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h
new file mode 100644 (file)
index 0000000..73213bd
--- /dev/null
@@ -0,0 +1,79 @@
+#ifndef __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_sr_meta_h__
+#define __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_sr_meta_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define GSP_FW_SR_META_MAGIC     0x8a3bb9e6c6c39d93ULL
+#define GSP_FW_SR_META_REVISION  2
+
+typedef struct
+{
+    //
+    // Magic
+    // Use for verification by Booter
+    //
+    NvU64 magic;  // = GSP_FW_SR_META_MAGIC;
+
+    //
+    // Revision number
+    // Bumped up when we change this interface so it is not backward compatible.
+    // Bumped up when we revoke GSP-RM ucode
+    //
+    NvU64 revision;  // = GSP_FW_SR_META_MAGIC_REVISION;
+
+    //
+    // ---- Members regarding data in SYSMEM ----------------------------
+    // Consumed by Booter for DMA
+    //
+    NvU64 sysmemAddrOfSuspendResumeData;
+    NvU64 sizeOfSuspendResumeData;
+
+    // ---- Members for crypto ops across S/R ---------------------------
+
+    //
+    // HMAC over the entire GspFwSRMeta structure (including padding)
+    // with the hmac field itself zeroed.
+    //
+    NvU8 hmac[32];
+
+    // Hash over GspFwWprMeta structure
+    NvU8 wprMetaHash[32];
+
+    // Hash over GspFwHeapFreeList structure. All zeros signifies no free list.
+    NvU8 heapFreeListHash[32];
+
+    // Hash over data in WPR2 (skipping over free heap chunks; see Booter for details)
+    NvU8 dataHash[32];
+
+    //
+    // Pad structure to exactly 256 bytes (1 DMA chunk).
+    // Padding initialized to zero.
+    //
+    NvU32 padding[24];
+
+} GspFwSRMeta;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h
new file mode 100644 (file)
index 0000000..a2e141e
--- /dev/null
@@ -0,0 +1,170 @@
+#ifndef __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_wpr_meta_h__
+#define __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_wpr_meta_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct
+{
+    // Magic
+    // BL to use for verification (i.e. Booter locked it in WPR2)
+    NvU64 magic; // = 0xdc3aae21371a60b3;
+
+    // Revision number of Booter-BL-Sequencer handoff interface
+    // Bumped up when we change this interface so it is not backward compatible.
+    // Bumped up when we revoke GSP-RM ucode
+    NvU64 revision; // = 1;
+
+    // ---- Members regarding data in SYSMEM ----------------------------
+    // Consumed by Booter for DMA
+
+    NvU64 sysmemAddrOfRadix3Elf;
+    NvU64 sizeOfRadix3Elf;
+
+    NvU64 sysmemAddrOfBootloader;
+    NvU64 sizeOfBootloader;
+
+    // Offsets inside bootloader image needed by Booter
+    NvU64 bootloaderCodeOffset;
+    NvU64 bootloaderDataOffset;
+    NvU64 bootloaderManifestOffset;
+
+    union
+    {
+        // Used only at initial boot
+        struct
+        {
+            NvU64 sysmemAddrOfSignature;
+            NvU64 sizeOfSignature;
+        };
+
+        //
+        // Used at suspend/resume to read GspFwHeapFreeList
+        // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart)
+        //
+        struct
+        {
+            NvU32 gspFwHeapFreeListWprOffset;
+            NvU32 unused0;
+            NvU64 unused1;
+        };
+    };
+
+    // ---- Members describing FB layout --------------------------------
+    NvU64 gspFwRsvdStart;
+
+    NvU64 nonWprHeapOffset;
+    NvU64 nonWprHeapSize;
+
+    NvU64 gspFwWprStart;
+
+    // GSP-RM to use to setup heap.
+    NvU64 gspFwHeapOffset;
+    NvU64 gspFwHeapSize;
+
+    // BL to use to find ELF for jump
+    NvU64 gspFwOffset;
+    // Size is sizeOfRadix3Elf above.
+
+    NvU64 bootBinOffset;
+    // Size is sizeOfBootloader above.
+
+    NvU64 frtsOffset;
+    NvU64 frtsSize;
+
+    NvU64 gspFwWprEnd;
+
+    // GSP-RM to use for fbRegionInfo?
+    NvU64 fbSize;
+
+    // ---- Other members -----------------------------------------------
+
+    // GSP-RM to use for fbRegionInfo?
+    NvU64 vgaWorkspaceOffset;
+    NvU64 vgaWorkspaceSize;
+
+    // Boot count.  Used to determine whether to load the firmware image.
+    NvU64 bootCount;
+
+    // TODO: the partitionRpc* fields below do not really belong in this
+    //       structure. The values are patched in by the partition bootstrapper
+    //       when GSP-RM is booted in a partition, and this structure was a
+    //       convenient place for the bootstrapper to access them. These should
+    //       be moved to a different comm. mechanism between the bootstrapper
+    //       and the GSP-RM tasks.
+
+    union
+    {
+       struct
+       {
+           // Shared partition RPC memory (physical address)
+           NvU64 partitionRpcAddr;
+
+           // Offsets relative to partitionRpcAddr
+           NvU16 partitionRpcRequestOffset;
+           NvU16 partitionRpcReplyOffset;
+
+           // Code section and dataSection offset and size.
+           NvU32 elfCodeOffset;
+           NvU32 elfDataOffset;
+           NvU32 elfCodeSize;
+           NvU32 elfDataSize;
+
+           // Used during GSP-RM resume to check for revocation
+           NvU32 lsUcodeVersion;
+       };
+
+        struct
+       {
+           // Pad for the partitionRpc* fields, plus 4 bytes
+           NvU32 partitionRpcPadding[4];
+
+            // CrashCat (contiguous) buffer size/location - occupies same bytes as the
+            // elf(Code|Data)(Offset|Size) fields above.
+            // TODO: move to GSP_FMC_INIT_PARAMS
+            NvU64 sysmemAddrOfCrashReportQueue;
+            NvU32 sizeOfCrashReportQueue;
+
+            // Pad for the lsUcodeVersion field
+            NvU32 lsUcodeVersionPadding[1];
+        };
+    };
+
+    // Number of VF partitions allocating sub-heaps from the WPR heap
+    // Used during boot to ensure the heap is adequately sized
+    NvU8 gspFwHeapVfPartitionCount;
+
+    // Pad structure to exactly 256 bytes.  Can replace padding with additional
+    // fields without incrementing revision.  Padding initialized to 0.
+    NvU8 padding[7];
+
+    // BL to use for verification (i.e. Booter says OK to boot)
+    NvU64 verified;  // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
+} GspFwWprMeta;
+
+#define GSP_FW_WPR_META_REVISION  1
+#define GSP_FW_WPR_META_MAGIC     0xdc3aae21371a60b3ULL
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h
new file mode 100644 (file)
index 0000000..4eff473
--- /dev/null
@@ -0,0 +1,82 @@
+#ifndef __src_nvidia_arch_nvalloc_common_inc_rmRiscvUcode_h__
+#define __src_nvidia_arch_nvalloc_common_inc_rmRiscvUcode_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct {
+    //
+    // Version 1
+    // Version 2
+    // Version 3 = for Partition boot
+    // Version 4 = for eb riscv boot
+    // Version 5 = Support signing entire RISC-V image as "code" in code section for hopper and later.
+    //
+    NvU32  version;                         // structure version
+    NvU32  bootloaderOffset;
+    NvU32  bootloaderSize;
+    NvU32  bootloaderParamOffset;
+    NvU32  bootloaderParamSize;
+    NvU32  riscvElfOffset;
+    NvU32  riscvElfSize;
+    NvU32  appVersion;                      // Changelist number associated with the image
+    //
+    // Manifest contains information about Monitor and it is
+    // input to BR
+    //
+    NvU32  manifestOffset;
+    NvU32  manifestSize;
+    //
+    // Monitor Data offset within RISCV image and size
+    //
+    NvU32  monitorDataOffset;
+    NvU32  monitorDataSize;
+    //
+    // Monitor Code offset withtin RISCV image and size
+    //
+    NvU32  monitorCodeOffset;
+    NvU32  monitorCodeSize;
+    NvU32  bIsMonitorEnabled;
+    //
+    // Swbrom Code offset within RISCV image and size
+    //
+    NvU32  swbromCodeOffset;
+    NvU32  swbromCodeSize;
+    //
+    // Swbrom Data offset within RISCV image and size
+    //
+    NvU32  swbromDataOffset;
+    NvU32  swbromDataSize;
+    //
+    // Total size of FB carveout (image and reserved space).  
+    //
+    NvU32  fbReservedSize;
+    //
+    // Indicates whether the entire RISC-V image is signed as "code" in code section.
+    //
+    NvU32  bSignedAsCode;
+} RM_RISCV_UCODE_DESC;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h
new file mode 100644 (file)
index 0000000..341ab0d
--- /dev/null
@@ -0,0 +1,100 @@
+#ifndef __src_nvidia_arch_nvalloc_common_inc_rmgspseq_h__
+#define __src_nvidia_arch_nvalloc_common_inc_rmgspseq_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef enum GSP_SEQ_BUF_OPCODE
+{
+    GSP_SEQ_BUF_OPCODE_REG_WRITE = 0,
+    GSP_SEQ_BUF_OPCODE_REG_MODIFY,
+    GSP_SEQ_BUF_OPCODE_REG_POLL,
+    GSP_SEQ_BUF_OPCODE_DELAY_US,
+    GSP_SEQ_BUF_OPCODE_REG_STORE,
+    GSP_SEQ_BUF_OPCODE_CORE_RESET,
+    GSP_SEQ_BUF_OPCODE_CORE_START,
+    GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
+    GSP_SEQ_BUF_OPCODE_CORE_RESUME,
+} GSP_SEQ_BUF_OPCODE;
+
+#define GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(opcode)                       \
+    ((opcode == GSP_SEQ_BUF_OPCODE_REG_WRITE)  ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_WRITE)  / sizeof(NvU32)) : \
+     (opcode == GSP_SEQ_BUF_OPCODE_REG_MODIFY) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_MODIFY) / sizeof(NvU32)) : \
+     (opcode == GSP_SEQ_BUF_OPCODE_REG_POLL)   ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_POLL)   / sizeof(NvU32)) : \
+     (opcode == GSP_SEQ_BUF_OPCODE_DELAY_US)   ? (sizeof(GSP_SEQ_BUF_PAYLOAD_DELAY_US)   / sizeof(NvU32)) : \
+     (opcode == GSP_SEQ_BUF_OPCODE_REG_STORE)  ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_STORE)  / sizeof(NvU32)) : \
+    /* GSP_SEQ_BUF_OPCODE_CORE_RESET */                                 \
+    /* GSP_SEQ_BUF_OPCODE_CORE_START */                                 \
+    /* GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT */                         \
+    /* GSP_SEQ_BUF_OPCODE_CORE_RESUME */                                \
+    0)
+
+typedef struct
+{
+    NvU32 addr;
+    NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_REG_WRITE;
+
+typedef struct
+{
+    NvU32 addr;
+    NvU32 mask;
+    NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_REG_MODIFY;
+
+typedef struct
+{
+    NvU32 addr;
+    NvU32 mask;
+    NvU32 val;
+    NvU32 timeout;
+    NvU32 error;
+} GSP_SEQ_BUF_PAYLOAD_REG_POLL;
+
+typedef struct
+{
+    NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_DELAY_US;
+
+typedef struct
+{
+    NvU32 addr;
+    NvU32 index;
+} GSP_SEQ_BUF_PAYLOAD_REG_STORE;
+
+typedef struct GSP_SEQUENCER_BUFFER_CMD
+{
+    GSP_SEQ_BUF_OPCODE opCode;
+    union
+    {
+        GSP_SEQ_BUF_PAYLOAD_REG_WRITE regWrite;
+        GSP_SEQ_BUF_PAYLOAD_REG_MODIFY regModify;
+        GSP_SEQ_BUF_PAYLOAD_REG_POLL regPoll;
+        GSP_SEQ_BUF_PAYLOAD_DELAY_US delayUs;
+        GSP_SEQ_BUF_PAYLOAD_REG_STORE regStore;
+    } payload;
+} GSP_SEQUENCER_BUFFER_CMD;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h
new file mode 100644 (file)
index 0000000..3144e9b
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef __src_nvidia_generated_g_allclasses_h__
+#define __src_nvidia_generated_g_allclasses_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV01_EVENT_KERNEL_CALLBACK_EX            (0x0000007e)
+
+#define NV04_DISPLAY_COMMON                      (0x00000073)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h
new file mode 100644 (file)
index 0000000..6b89211
--- /dev/null
@@ -0,0 +1,38 @@
+#ifndef __src_nvidia_generated_g_chipset_nvoc_h__
+#define __src_nvidia_generated_g_chipset_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct
+{
+    NvU16               deviceID;           // deviceID
+    NvU16               vendorID;           // vendorID
+    NvU16               subdeviceID;        // subsystem deviceID
+    NvU16               subvendorID;        // subsystem vendorID
+    NvU8                revisionID;         // revision ID
+} BUSINFO;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h
new file mode 100644 (file)
index 0000000..a5128f0
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef __src_nvidia_generated_g_fbsr_nvoc_h__
+#define __src_nvidia_generated_g_fbsr_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define FBSR_TYPE_DMA                                 4   // Copy using DMA. Fastest.
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h
new file mode 100644 (file)
index 0000000..5641a21
--- /dev/null
@@ -0,0 +1,35 @@
+#ifndef __src_nvidia_generated_g_gpu_nvoc_h__
+#define __src_nvidia_generated_g_gpu_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef enum
+{
+    COMPUTE_BRANDING_TYPE_NONE,
+    COMPUTE_BRANDING_TYPE_TESLA,
+} COMPUTE_BRANDING_TYPE;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h
new file mode 100644 (file)
index 0000000..b5ad55f
--- /dev/null
@@ -0,0 +1,62 @@
+#ifndef __src_nvidia_generated_g_kernel_channel_nvoc_h__
+#define __src_nvidia_generated_g_kernel_channel_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef enum {
+    /*!
+     * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by
+     * kernel CPU-RM clients.
+     */
+    ERROR_NOTIFIER_TYPE_UNKNOWN = 0,
+    /*! @brief Error notifier is explicitly not set.
+     *
+     * The corresponding hErrorContext or hEccErrorContext must be
+     * NV01_NULL_OBJECT.
+     */
+    ERROR_NOTIFIER_TYPE_NONE,
+    /*! @brief Error notifier is a ContextDma */
+    ERROR_NOTIFIER_TYPE_CTXDMA,
+    /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */
+    ERROR_NOTIFIER_TYPE_MEMORY
+} ErrorNotifierType;
+
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE                       1:0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER                  0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN                 0x1
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL                0x2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE             3:2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN     ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE        ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA      ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY      ERROR_NOTIFIER_TYPE_MEMORY
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE         5:4
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE    ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA  ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY  ERROR_NOTIFIER_TYPE_MEMORY
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h
new file mode 100644 (file)
index 0000000..946954a
--- /dev/null
@@ -0,0 +1,119 @@
+#ifndef __src_nvidia_generated_g_kernel_fifo_nvoc_h__
+#define __src_nvidia_generated_g_kernel_fifo_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef enum
+{
+    /* *************************************************************************
+     * Bug 3820969
+     * THINK BEFORE CHANGING ENUM ORDER HERE.
+     * VGPU-guest uses this same ordering. Because this enum is not versioned,
+     * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
+     * ************************************************************************/
+
+    // *ENG_XYZ, e.g.: ENG_GR, ENG_CE etc.,
+    ENGINE_INFO_TYPE_ENG_DESC = 0,
+
+    // HW engine ID
+    ENGINE_INFO_TYPE_FIFO_TAG,
+
+    // RM_ENGINE_TYPE_*
+    ENGINE_INFO_TYPE_RM_ENGINE_TYPE,
+
+    //
+    // runlist id (meaning varies by GPU)
+    // Valid only for Esched-driven engines
+    //
+    ENGINE_INFO_TYPE_RUNLIST,
+
+    // NV_PFIFO_INTR_MMU_FAULT_ENG_ID_*
+    ENGINE_INFO_TYPE_MMU_FAULT_ID,
+
+    // ROBUST_CHANNEL_*
+    ENGINE_INFO_TYPE_RC_MASK,
+
+    // Reset Bit Position. On Ampere, only valid if not _INVALID
+    ENGINE_INFO_TYPE_RESET,
+
+    // Interrupt Bit Position
+    ENGINE_INFO_TYPE_INTR,
+
+    // log2(MC_ENGINE_*)
+    ENGINE_INFO_TYPE_MC,
+
+    // The DEV_TYPE_ENUM for this engine
+    ENGINE_INFO_TYPE_DEV_TYPE_ENUM,
+
+    // The particular instance of this engine type
+    ENGINE_INFO_TYPE_INSTANCE_ID,
+
+    //
+    // The base address for this engine's NV_RUNLIST. Valid only on Ampere+
+    // Valid only for Esched-driven engines
+    //
+    ENGINE_INFO_TYPE_RUNLIST_PRI_BASE,
+
+    //
+    // If this entry is a host-driven engine.
+    // Update _isEngineInfoTypeValidForOnlyHostDriven when adding any new entry.
+    //
+    ENGINE_INFO_TYPE_IS_HOST_DRIVEN_ENGINE,
+
+    //
+    // The index into the per-engine NV_RUNLIST registers. Valid only on Ampere+
+    // Valid only for Esched-driven engines
+    //
+    ENGINE_INFO_TYPE_RUNLIST_ENGINE_ID,
+
+    //
+    // The base address for this engine's NV_CHRAM registers. Valid only on
+    // Ampere+
+    //
+    // Valid only for Esched-driven engines
+    //
+    ENGINE_INFO_TYPE_CHRAM_PRI_BASE,
+
+    // This entry added to copy data at RMCTRL_EXPORT() call for Kernel RM
+    ENGINE_INFO_TYPE_KERNEL_RM_MAX,
+    // Used for iterating the engine info table by the index passed.
+    ENGINE_INFO_TYPE_INVALID = ENGINE_INFO_TYPE_KERNEL_RM_MAX,
+
+    // Size of FIFO_ENGINE_LIST.engineData
+    ENGINE_INFO_TYPE_ENGINE_DATA_ARRAY_SIZE = ENGINE_INFO_TYPE_INVALID,
+
+    // Input-only parameter for kfifoEngineInfoXlate.
+    ENGINE_INFO_TYPE_PBDMA_ID
+
+    /* *************************************************************************
+     * Bug 3820969
+     * THINK BEFORE CHANGING ENUM ORDER HERE.
+     * VGPU-guest uses this same ordering. Because this enum is not versioned,
+     * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
+     * ************************************************************************/
+} ENGINE_INFO_TYPE;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h
new file mode 100644 (file)
index 0000000..daabaee
--- /dev/null
@@ -0,0 +1,32 @@
+#ifndef __src_nvidia_generated_g_mem_desc_nvoc_h__
+#define __src_nvidia_generated_g_mem_desc_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define ADDR_SYSMEM     1         // System memory (PCI)
+#define ADDR_FBMEM      2         // Frame buffer memory space
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
new file mode 100644 (file)
index 0000000..754c6af
--- /dev/null
@@ -0,0 +1,44 @@
+#ifndef __src_nvidia_generated_g_os_nvoc_h__
+#define __src_nvidia_generated_g_os_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct PACKED_REGISTRY_ENTRY
+{
+    NvU32                   nameOffset;
+    NvU8                    type;
+    NvU32                   data;
+    NvU32                   length;
+} PACKED_REGISTRY_ENTRY;
+
+typedef struct PACKED_REGISTRY_TABLE
+{
+    NvU32                   size;
+    NvU32                   numEntries;
+    PACKED_REGISTRY_ENTRY   entries[0];
+} PACKED_REGISTRY_TABLE;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h
new file mode 100644 (file)
index 0000000..8d925e2
--- /dev/null
@@ -0,0 +1,124 @@
+#ifndef __src_nvidia_generated_g_rpc_structures_h__
+#define __src_nvidia_generated_g_rpc_structures_h__
+#include <nvrm/535.113.01/nvidia/generated/g_sdk-structures.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct rpc_alloc_memory_v13_01
+{
+    NvHandle   hClient;
+    NvHandle   hDevice;
+    NvHandle   hMemory;
+    NvU32      hClass;
+    NvU32      flags;
+    NvU32      pteAdjust;
+    NvU32      format;
+    NvU64      length NV_ALIGN_BYTES(8);
+    NvU32      pageCount;
+    struct pte_desc pteDesc;
+} rpc_alloc_memory_v13_01;
+
+typedef struct rpc_free_v03_00
+{
+    NVOS00_PARAMETERS_v03_00 params;
+} rpc_free_v03_00;
+
+typedef struct rpc_unloading_guest_driver_v1F_07
+{
+    NvBool     bInPMTransition;
+    NvBool     bGc6Entering;
+    NvU32      newLevel;
+} rpc_unloading_guest_driver_v1F_07;
+
+typedef struct rpc_update_bar_pde_v15_00
+{
+    UpdateBarPde_v15_00 info;
+} rpc_update_bar_pde_v15_00;
+
+typedef struct rpc_gsp_rm_alloc_v03_00
+{
+    NvHandle   hClient;
+    NvHandle   hParent;
+    NvHandle   hObject;
+    NvU32      hClass;
+    NvU32      status;
+    NvU32      paramsSize;
+    NvU32      flags;
+    NvU8       reserved[4];
+    NvU8       params[];
+} rpc_gsp_rm_alloc_v03_00;
+
+typedef struct rpc_gsp_rm_control_v03_00
+{
+    NvHandle   hClient;
+    NvHandle   hObject;
+    NvU32      cmd;
+    NvU32      status;
+    NvU32      paramsSize;
+    NvU32      flags;
+    NvU8       params[];
+} rpc_gsp_rm_control_v03_00;
+
+typedef struct rpc_run_cpu_sequencer_v17_00
+{
+    NvU32      bufferSizeDWord;
+    NvU32      cmdIndex;
+    NvU32      regSaveArea[8];
+    NvU32      commandBuffer[];
+} rpc_run_cpu_sequencer_v17_00;
+
+typedef struct rpc_post_event_v17_00
+{
+    NvHandle   hClient;
+    NvHandle   hEvent;
+    NvU32      notifyIndex;
+    NvU32      data;
+    NvU16      info16;
+    NvU32      status;
+    NvU32      eventDataSize;
+    NvBool     bNotifyList;
+    NvU8       eventData[];
+} rpc_post_event_v17_00;
+
+typedef struct rpc_rc_triggered_v17_02
+{
+    NvU32      nv2080EngineType;
+    NvU32      chid;
+    NvU32      exceptType;
+    NvU32      scope;
+    NvU16      partitionAttributionId;
+} rpc_rc_triggered_v17_02;
+
+typedef struct rpc_os_error_log_v17_00
+{
+    NvU32      exceptType;
+    NvU32      runlistId;
+    NvU32      chid;
+    char       errString[0x100];
+} rpc_os_error_log_v17_00;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h
new file mode 100644 (file)
index 0000000..e9fed41
--- /dev/null
@@ -0,0 +1,45 @@
+#ifndef __src_nvidia_generated_g_sdk_structures_h__
+#define __src_nvidia_generated_g_sdk_structures_h__
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NVOS00_PARAMETERS_v03_00
+{
+    NvHandle   hRoot;
+    NvHandle   hObjectParent;
+    NvHandle   hObjectOld;
+    NvV32      status;
+} NVOS00_PARAMETERS_v03_00;
+
+typedef struct UpdateBarPde_v15_00
+{
+    NV_RPC_UPDATE_PDE_BAR_TYPE barType;
+    NvU64      entryValue NV_ALIGN_BYTES(8);
+    NvU64      entryLevelShift NV_ALIGN_BYTES(8);
+} UpdateBarPde_v15_00;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h
new file mode 100644 (file)
index 0000000..af50b11
--- /dev/null
@@ -0,0 +1,74 @@
+#ifndef __src_nvidia_inc_kernel_gpu_gpu_acpi_data_h__
+#define __src_nvidia_inc_kernel_gpu_gpu_acpi_data_h__
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct DOD_METHOD_DATA
+{
+    NV_STATUS status;
+    NvU32     acpiIdListLen;
+    NvU32     acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} DOD_METHOD_DATA;
+
+typedef struct JT_METHOD_DATA
+{
+    NV_STATUS status;
+    NvU32     jtCaps;
+    NvU16     jtRevId;
+    NvBool    bSBIOSCaps;
+} JT_METHOD_DATA;
+
+typedef struct MUX_METHOD_DATA_ELEMENT
+{
+    NvU32       acpiId;
+    NvU32       mode;
+    NV_STATUS   status;
+} MUX_METHOD_DATA_ELEMENT;
+
+typedef struct MUX_METHOD_DATA
+{
+    NvU32                       tableLen;
+    MUX_METHOD_DATA_ELEMENT     acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+    MUX_METHOD_DATA_ELEMENT     acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} MUX_METHOD_DATA;
+
+typedef struct CAPS_METHOD_DATA
+{
+    NV_STATUS status;
+    NvU32     optimusCaps;
+} CAPS_METHOD_DATA;
+
+typedef struct ACPI_METHOD_DATA
+{
+    NvBool                                               bValid;
+    DOD_METHOD_DATA                                      dodMethodData;
+    JT_METHOD_DATA                                       jtMethodData;
+    MUX_METHOD_DATA                                      muxMethodData;
+    CAPS_METHOD_DATA                                     capsMethodData;
+} ACPI_METHOD_DATA;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h
new file mode 100644 (file)
index 0000000..e3160c6
--- /dev/null
@@ -0,0 +1,86 @@
+#ifndef __src_nvidia_inc_kernel_gpu_gpu_engine_type_h__
+#define __src_nvidia_inc_kernel_gpu_gpu_engine_type_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef enum
+{
+    RM_ENGINE_TYPE_NULL                 =       (0x00000000),
+    RM_ENGINE_TYPE_GR0                  =       (0x00000001),
+    RM_ENGINE_TYPE_GR1                  =       (0x00000002),
+    RM_ENGINE_TYPE_GR2                  =       (0x00000003),
+    RM_ENGINE_TYPE_GR3                  =       (0x00000004),
+    RM_ENGINE_TYPE_GR4                  =       (0x00000005),
+    RM_ENGINE_TYPE_GR5                  =       (0x00000006),
+    RM_ENGINE_TYPE_GR6                  =       (0x00000007),
+    RM_ENGINE_TYPE_GR7                  =       (0x00000008),
+    RM_ENGINE_TYPE_COPY0                =       (0x00000009),
+    RM_ENGINE_TYPE_COPY1                =       (0x0000000a),
+    RM_ENGINE_TYPE_COPY2                =       (0x0000000b),
+    RM_ENGINE_TYPE_COPY3                =       (0x0000000c),
+    RM_ENGINE_TYPE_COPY4                =       (0x0000000d),
+    RM_ENGINE_TYPE_COPY5                =       (0x0000000e),
+    RM_ENGINE_TYPE_COPY6                =       (0x0000000f),
+    RM_ENGINE_TYPE_COPY7                =       (0x00000010),
+    RM_ENGINE_TYPE_COPY8                =       (0x00000011),
+    RM_ENGINE_TYPE_COPY9                =       (0x00000012),
+    RM_ENGINE_TYPE_NVDEC0               =       (0x0000001d),
+    RM_ENGINE_TYPE_NVDEC1               =       (0x0000001e),
+    RM_ENGINE_TYPE_NVDEC2               =       (0x0000001f),
+    RM_ENGINE_TYPE_NVDEC3               =       (0x00000020),
+    RM_ENGINE_TYPE_NVDEC4               =       (0x00000021),
+    RM_ENGINE_TYPE_NVDEC5               =       (0x00000022),
+    RM_ENGINE_TYPE_NVDEC6               =       (0x00000023),
+    RM_ENGINE_TYPE_NVDEC7               =       (0x00000024),
+    RM_ENGINE_TYPE_NVENC0               =       (0x00000025),
+    RM_ENGINE_TYPE_NVENC1               =       (0x00000026),
+    RM_ENGINE_TYPE_NVENC2               =       (0x00000027),
+    RM_ENGINE_TYPE_VP                   =       (0x00000028),
+    RM_ENGINE_TYPE_ME                   =       (0x00000029),
+    RM_ENGINE_TYPE_PPP                  =       (0x0000002a),
+    RM_ENGINE_TYPE_MPEG                 =       (0x0000002b),
+    RM_ENGINE_TYPE_SW                   =       (0x0000002c),
+    RM_ENGINE_TYPE_TSEC                 =       (0x0000002d),
+    RM_ENGINE_TYPE_VIC                  =       (0x0000002e),
+    RM_ENGINE_TYPE_MP                   =       (0x0000002f),
+    RM_ENGINE_TYPE_SEC2                 =       (0x00000030),
+    RM_ENGINE_TYPE_HOST                 =       (0x00000031),
+    RM_ENGINE_TYPE_DPU                  =       (0x00000032),
+    RM_ENGINE_TYPE_PMU                  =       (0x00000033),
+    RM_ENGINE_TYPE_FBFLCN               =       (0x00000034),
+    RM_ENGINE_TYPE_NVJPEG0              =       (0x00000035),
+    RM_ENGINE_TYPE_NVJPEG1              =       (0x00000036),
+    RM_ENGINE_TYPE_NVJPEG2              =       (0x00000037),
+    RM_ENGINE_TYPE_NVJPEG3              =       (0x00000038),
+    RM_ENGINE_TYPE_NVJPEG4              =       (0x00000039),
+    RM_ENGINE_TYPE_NVJPEG5              =       (0x0000003a),
+    RM_ENGINE_TYPE_NVJPEG6              =       (0x0000003b),
+    RM_ENGINE_TYPE_NVJPEG7              =       (0x0000003c),
+    RM_ENGINE_TYPE_OFA                  =       (0x0000003d),
+    RM_ENGINE_TYPE_LAST                 =       (0x0000003e),
+} RM_ENGINE_TYPE;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h
new file mode 100644 (file)
index 0000000..3abec59
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_fw_heap_h__
+#define __src_nvidia_inc_kernel_gpu_gsp_gsp_fw_heap_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB                  (96 << 10)   // All architectures
+
+#define GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE      ((48 << 10) * 2048)   // Support 2048 channels
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h
new file mode 100644 (file)
index 0000000..4033a6f
--- /dev/null
@@ -0,0 +1,57 @@
+#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_init_args_h__
+#define __src_nvidia_inc_kernel_gpu_gsp_gsp_init_args_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct {
+    RmPhysAddr sharedMemPhysAddr;
+    NvU32 pageTableEntryCount;
+    NvLength cmdQueueOffset;
+    NvLength statQueueOffset;
+    NvLength locklessCmdQueueOffset;
+    NvLength locklessStatQueueOffset;
+} MESSAGE_QUEUE_INIT_ARGUMENTS;
+
+typedef struct {
+    NvU32 oldLevel;
+    NvU32 flags;
+    NvBool bInPMTransition;
+} GSP_SR_INIT_ARGUMENTS;
+
+typedef struct
+{
+    MESSAGE_QUEUE_INIT_ARGUMENTS      messageQueueInitArguments;
+    GSP_SR_INIT_ARGUMENTS             srInitArguments;
+    NvU32                             gpuInstance;
+
+    struct
+    {
+        NvU64                         pa;
+        NvU64                         size;
+    } profilerArgs;
+} GSP_ARGUMENTS_CACHED;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h
new file mode 100644 (file)
index 0000000..eeab25a
--- /dev/null
@@ -0,0 +1,174 @@
+#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_static_config_h__
+#define __src_nvidia_inc_kernel_gpu_gsp_gsp_static_config_h__
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h>
+#include <nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h>
+#include <nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct GSP_VF_INFO
+{
+    NvU32  totalVFs;
+    NvU32  firstVFOffset;
+    NvU64  FirstVFBar0Address;
+    NvU64  FirstVFBar1Address;
+    NvU64  FirstVFBar2Address;
+    NvBool b64bitBar0;
+    NvBool b64bitBar1;
+    NvBool b64bitBar2;
+} GSP_VF_INFO;
+
+typedef struct GspSMInfo_t
+{
+    NvU32 version;
+    NvU32 regBankCount;
+    NvU32 regBankRegCount;
+    NvU32 maxWarpsPerSM;
+    NvU32 maxThreadsPerWarp;
+    NvU32 geomGsObufEntries;
+    NvU32 geomXbufEntries;
+    NvU32 maxSPPerSM;
+    NvU32 rtCoreCount;
+} GspSMInfo;
+
+typedef struct GspStaticConfigInfo_t
+{
+    NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE];
+    NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo;
+    NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS gpcInfo;
+    NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS tpcInfo[MAX_GPC_COUNT];
+    NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS zcullInfo[MAX_GPC_COUNT];
+    NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo;
+    NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams;
+    COMPUTE_BRANDING_TYPE computeBranding;
+
+    NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps;
+    NvU32 sriovMaxGfid;
+
+    NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX];
+
+    GspSMInfo SM_info;
+
+    NvBool poisonFuseEnabled;
+
+    NvU64 fb_length;
+    NvU32 fbio_mask;
+    NvU32 fb_bus_width;
+    NvU32 fb_ram_type;
+    NvU32 fbp_mask;
+    NvU32 l2_cache_size;
+
+    NvU32 gfxpBufferSize[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
+    NvU32 gfxpBufferAlignment[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
+
+    NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+    NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+    NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+    NvBool bGpuInternalSku;
+    NvBool bIsQuadroGeneric;
+    NvBool bIsQuadroAd;
+    NvBool bIsNvidiaNvs;
+    NvBool bIsVgx;
+    NvBool bGeforceSmb;
+    NvBool bIsTitan;
+    NvBool bIsTesla;
+    NvBool bIsMobile;
+    NvBool bIsGc6Rtd3Allowed;
+    NvBool bIsGcOffRtd3Allowed;
+    NvBool bIsGcoffLegacyAllowed;
+
+    NvU64 bar1PdeBase;
+    NvU64 bar2PdeBase;
+
+    NvBool bVbiosValid;
+    NvU32 vbiosSubVendor;
+    NvU32 vbiosSubDevice;
+
+    NvBool bPageRetirementSupported;
+
+    NvBool bSplitVasBetweenServerClientRm;
+
+    NvBool bClRootportNeedsNosnoopWAR;
+
+    VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads;
+    VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution;
+    NvU64 displaylessMaxPixels;
+
+    // Client handle for internal RMAPI control.
+    NvHandle hInternalClient;
+
+    // Device handle for internal RMAPI control.
+    NvHandle hInternalDevice;
+
+    // Subdevice handle for internal RMAPI control.
+    NvHandle hInternalSubdevice;
+
+    NvBool bSelfHostedMode;
+    NvBool bAtsSupported;
+
+    NvBool bIsGpuUefi;
+} GspStaticConfigInfo;
+
+typedef struct GspSystemInfo
+{
+    NvU64 gpuPhysAddr;
+    NvU64 gpuPhysFbAddr;
+    NvU64 gpuPhysInstAddr;
+    NvU64 nvDomainBusDeviceFunc;
+    NvU64 simAccessBufPhysAddr;
+    NvU64 pcieAtomicsOpMask;
+    NvU64 consoleMemSize;
+    NvU64 maxUserVa;
+    NvU32 pciConfigMirrorBase;
+    NvU32 pciConfigMirrorSize;
+    NvU8 oorArch;
+    NvU64 clPdbProperties;
+    NvU32 Chipset;
+    NvBool bGpuBehindBridge;
+    NvBool bMnocAvailable;
+    NvBool bUpstreamL0sUnsupported;
+    NvBool bUpstreamL1Unsupported;
+    NvBool bUpstreamL1PorSupported;
+    NvBool bUpstreamL1PorMobileOnly;
+    NvU8   upstreamAddressValid;
+    BUSINFO FHBBusInfo;
+    BUSINFO chipsetIDInfo;
+    ACPI_METHOD_DATA acpiMethodData;
+    NvU32 hypervisorType;
+    NvBool bIsPassthru;
+    NvU64 sysTimerOffsetNs;
+    GSP_VF_INFO gspVFInfo;
+} GspSystemInfo;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h
new file mode 100644 (file)
index 0000000..bd5e01f
--- /dev/null
@@ -0,0 +1,57 @@
+#ifndef __src_nvidia_inc_kernel_gpu_intr_engine_idx_h__
+#define __src_nvidia_inc_kernel_gpu_intr_engine_idx_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define MC_ENGINE_IDX_DISP                          2
+
+#define MC_ENGINE_IDX_CE0                           15
+
+#define MC_ENGINE_IDX_CE9                           24
+
+#define MC_ENGINE_IDX_MSENC                         38
+
+#define MC_ENGINE_IDX_MSENC2                        40
+
+#define MC_ENGINE_IDX_GSP                           49
+#define MC_ENGINE_IDX_NVJPG                         50
+#define MC_ENGINE_IDX_NVJPEG                        MC_ENGINE_IDX_NVJPG
+#define MC_ENGINE_IDX_NVJPEG0                       MC_ENGINE_IDX_NVJPEG
+
+#define MC_ENGINE_IDX_NVJPEG7                       57
+
+#define MC_ENGINE_IDX_BSP                           64
+#define MC_ENGINE_IDX_NVDEC                         MC_ENGINE_IDX_BSP
+#define MC_ENGINE_IDX_NVDEC0                        MC_ENGINE_IDX_NVDEC
+
+#define MC_ENGINE_IDX_NVDEC7                        71
+
+#define MC_ENGINE_IDX_OFA0                          80
+
+#define MC_ENGINE_IDX_GR                            82
+#define MC_ENGINE_IDX_GR0                           MC_ENGINE_IDX_GR
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h
new file mode 100644 (file)
index 0000000..366447a
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef __src_nvidia_inc_kernel_gpu_nvbitmask_h__
+#define __src_nvidia_inc_kernel_gpu_nvbitmask_h__
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NVGPU_ENGINE_CAPS_MASK_BITS                32
+#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX           ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h
new file mode 100644 (file)
index 0000000..4a850da
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef __src_nvidia_inc_kernel_os_nv_memory_type_h__
+#define __src_nvidia_inc_kernel_os_nv_memory_type_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV_MEMORY_WRITECOMBINED      2
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h
new file mode 100644 (file)
index 0000000..73c57f2
--- /dev/null
@@ -0,0 +1,262 @@
+#ifndef __src_nvidia_kernel_inc_vgpu_rpc_global_enums_h__
+#define __src_nvidia_kernel_inc_vgpu_rpc_global_enums_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#ifndef X
+#    define X(UNIT, RPC) NV_VGPU_MSG_FUNCTION_##RPC,
+#    define DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+    X(RM, NOP)                             // 0
+    X(RM, SET_GUEST_SYSTEM_INFO)           // 1
+    X(RM, ALLOC_ROOT)                      // 2
+    X(RM, ALLOC_DEVICE)                    // 3 deprecated
+    X(RM, ALLOC_MEMORY)                    // 4
+    X(RM, ALLOC_CTX_DMA)                   // 5
+    X(RM, ALLOC_CHANNEL_DMA)               // 6
+    X(RM, MAP_MEMORY)                      // 7
+    X(RM, BIND_CTX_DMA)                    // 8 deprecated
+    X(RM, ALLOC_OBJECT)                    // 9
+    X(RM, FREE)                            //10
+    X(RM, LOG)                             //11
+    X(RM, ALLOC_VIDMEM)                    //12
+    X(RM, UNMAP_MEMORY)                    //13
+    X(RM, MAP_MEMORY_DMA)                  //14
+    X(RM, UNMAP_MEMORY_DMA)                //15
+    X(RM, GET_EDID)                        //16
+    X(RM, ALLOC_DISP_CHANNEL)              //17
+    X(RM, ALLOC_DISP_OBJECT)               //18
+    X(RM, ALLOC_SUBDEVICE)                 //19
+    X(RM, ALLOC_DYNAMIC_MEMORY)            //20
+    X(RM, DUP_OBJECT)                      //21
+    X(RM, IDLE_CHANNELS)                   //22
+    X(RM, ALLOC_EVENT)                     //23
+    X(RM, SEND_EVENT)                      //24
+    X(RM, REMAPPER_CONTROL)                //25 deprecated
+    X(RM, DMA_CONTROL)                     //26
+    X(RM, DMA_FILL_PTE_MEM)                //27
+    X(RM, MANAGE_HW_RESOURCE)              //28
+    X(RM, BIND_ARBITRARY_CTX_DMA)          //29 deprecated
+    X(RM, CREATE_FB_SEGMENT)               //30
+    X(RM, DESTROY_FB_SEGMENT)              //31
+    X(RM, ALLOC_SHARE_DEVICE)              //32
+    X(RM, DEFERRED_API_CONTROL)            //33
+    X(RM, REMOVE_DEFERRED_API)             //34
+    X(RM, SIM_ESCAPE_READ)                 //35
+    X(RM, SIM_ESCAPE_WRITE)                //36
+    X(RM, SIM_MANAGE_DISPLAY_CONTEXT_DMA)  //37
+    X(RM, FREE_VIDMEM_VIRT)                //38
+    X(RM, PERF_GET_PSTATE_INFO)            //39  deprecated for vGPU, used by GSP
+    X(RM, PERF_GET_PERFMON_SAMPLE)         //40
+    X(RM, PERF_GET_VIRTUAL_PSTATE_INFO)    //41  deprecated
+    X(RM, PERF_GET_LEVEL_INFO)             //42
+    X(RM, MAP_SEMA_MEMORY)                 //43
+    X(RM, UNMAP_SEMA_MEMORY)               //44
+    X(RM, SET_SURFACE_PROPERTIES)          //45
+    X(RM, CLEANUP_SURFACE)                 //46
+    X(RM, UNLOADING_GUEST_DRIVER)          //47
+    X(RM, TDR_SET_TIMEOUT_STATE)           //48
+    X(RM, SWITCH_TO_VGA)                   //49
+    X(RM, GPU_EXEC_REG_OPS)                //50
+    X(RM, GET_STATIC_INFO)                 //51
+    X(RM, ALLOC_VIRTMEM)                   //52
+    X(RM, UPDATE_PDE_2)                    //53
+    X(RM, SET_PAGE_DIRECTORY)              //54
+    X(RM, GET_STATIC_PSTATE_INFO)          //55
+    X(RM, TRANSLATE_GUEST_GPU_PTES)        //56
+    X(RM, RESERVED_57)                     //57
+    X(RM, RESET_CURRENT_GR_CONTEXT)        //58
+    X(RM, SET_SEMA_MEM_VALIDATION_STATE)   //59
+    X(RM, GET_ENGINE_UTILIZATION)          //60
+    X(RM, UPDATE_GPU_PDES)                 //61
+    X(RM, GET_ENCODER_CAPACITY)            //62
+    X(RM, VGPU_PF_REG_READ32)              //63
+    X(RM, SET_GUEST_SYSTEM_INFO_EXT)       //64
+    X(GSP, GET_GSP_STATIC_INFO)            //65
+    X(RM, RMFS_INIT)                       //66
+    X(RM, RMFS_CLOSE_QUEUE)                //67
+    X(RM, RMFS_CLEANUP)                    //68
+    X(RM, RMFS_TEST)                       //69
+    X(RM, UPDATE_BAR_PDE)                  //70
+    X(RM, CONTINUATION_RECORD)             //71
+    X(RM, GSP_SET_SYSTEM_INFO)             //72
+    X(RM, SET_REGISTRY)                    //73
+    X(GSP, GSP_INIT_POST_OBJGPU)           //74 deprecated
+    X(RM, SUBDEV_EVENT_SET_NOTIFICATION)   //75 deprecated
+    X(GSP, GSP_RM_CONTROL)                 //76
+    X(RM, GET_STATIC_INFO2)                //77
+    X(RM, DUMP_PROTOBUF_COMPONENT)         //78
+    X(RM, UNSET_PAGE_DIRECTORY)            //79
+    X(RM, GET_CONSOLIDATED_STATIC_INFO)    //80
+    X(RM, GMMU_REGISTER_FAULT_BUFFER)      //81 deprecated
+    X(RM, GMMU_UNREGISTER_FAULT_BUFFER)    //82 deprecated
+    X(RM, GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER)   //83 deprecated
+    X(RM, GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER) //84 deprecated
+    X(RM, CTRL_SET_VGPU_FB_USAGE)          //85
+    X(RM, CTRL_NVFBC_SW_SESSION_UPDATE_INFO)    //86
+    X(RM, CTRL_NVENC_SW_SESSION_UPDATE_INFO)    //87
+    X(RM, CTRL_RESET_CHANNEL)                   //88
+    X(RM, CTRL_RESET_ISOLATED_CHANNEL)          //89
+    X(RM, CTRL_GPU_HANDLE_VF_PRI_FAULT)         //90
+    X(RM, CTRL_CLK_GET_EXTENDED_INFO)           //91
+    X(RM, CTRL_PERF_BOOST)                      //92
+    X(RM, CTRL_PERF_VPSTATES_GET_CONTROL)       //93
+    X(RM, CTRL_GET_ZBC_CLEAR_TABLE)             //94
+    X(RM, CTRL_SET_ZBC_COLOR_CLEAR)             //95
+    X(RM, CTRL_SET_ZBC_DEPTH_CLEAR)             //96
+    X(RM, CTRL_GPFIFO_SCHEDULE)                 //97
+    X(RM, CTRL_SET_TIMESLICE)                   //98
+    X(RM, CTRL_PREEMPT)                         //99
+    X(RM, CTRL_FIFO_DISABLE_CHANNELS)           //100
+    X(RM, CTRL_SET_TSG_INTERLEAVE_LEVEL)        //101
+    X(RM, CTRL_SET_CHANNEL_INTERLEAVE_LEVEL)    //102
+    X(GSP, GSP_RM_ALLOC)                        //103
+    X(RM, CTRL_GET_P2P_CAPS_V2)                 //104
+    X(RM, CTRL_CIPHER_AES_ENCRYPT)              //105
+    X(RM, CTRL_CIPHER_SESSION_KEY)              //106
+    X(RM, CTRL_CIPHER_SESSION_KEY_STATUS)       //107
+    X(RM, CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES)   //108
+    X(RM, CTRL_DBG_READ_ALL_SM_ERROR_STATES)    //109
+    X(RM, CTRL_DBG_SET_EXCEPTION_MASK)          //110
+    X(RM, CTRL_GPU_PROMOTE_CTX)                 //111
+    X(RM, CTRL_GR_CTXSW_PREEMPTION_BIND)        //112
+    X(RM, CTRL_GR_SET_CTXSW_PREEMPTION_MODE)    //113
+    X(RM, CTRL_GR_CTXSW_ZCULL_BIND)             //114
+    X(RM, CTRL_GPU_INITIALIZE_CTX)              //115
+    X(RM, CTRL_VASPACE_COPY_SERVER_RESERVED_PDES)    //116
+    X(RM, CTRL_FIFO_CLEAR_FAULTED_BIT)          //117
+    X(RM, CTRL_GET_LATEST_ECC_ADDRESSES)        //118
+    X(RM, CTRL_MC_SERVICE_INTERRUPTS)           //119
+    X(RM, CTRL_DMA_SET_DEFAULT_VASPACE)         //120
+    X(RM, CTRL_GET_CE_PCE_MASK)                 //121
+    X(RM, CTRL_GET_ZBC_CLEAR_TABLE_ENTRY)       //122
+    X(RM, CTRL_GET_NVLINK_PEER_ID_MASK)         //123
+    X(RM, CTRL_GET_NVLINK_STATUS)               //124
+    X(RM, CTRL_GET_P2P_CAPS)                    //125
+    X(RM, CTRL_GET_P2P_CAPS_MATRIX)             //126
+    X(RM, RESERVED_0)                           //127
+    X(RM, CTRL_RESERVE_PM_AREA_SMPC)            //128
+    X(RM, CTRL_RESERVE_HWPM_LEGACY)             //129
+    X(RM, CTRL_B0CC_EXEC_REG_OPS)               //130
+    X(RM, CTRL_BIND_PM_RESOURCES)               //131
+    X(RM, CTRL_DBG_SUSPEND_CONTEXT)             //132
+    X(RM, CTRL_DBG_RESUME_CONTEXT)              //133
+    X(RM, CTRL_DBG_EXEC_REG_OPS)                //134
+    X(RM, CTRL_DBG_SET_MODE_MMU_DEBUG)          //135
+    X(RM, CTRL_DBG_READ_SINGLE_SM_ERROR_STATE)  //136
+    X(RM, CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE) //137
+    X(RM, CTRL_DBG_SET_MODE_ERRBAR_DEBUG)       //138
+    X(RM, CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE)  //139
+    X(RM, CTRL_ALLOC_PMA_STREAM)                //140
+    X(RM, CTRL_PMA_STREAM_UPDATE_GET_PUT)       //141
+    X(RM, CTRL_FB_GET_INFO_V2)                  //142
+    X(RM, CTRL_FIFO_SET_CHANNEL_PROPERTIES)     //143
+    X(RM, CTRL_GR_GET_CTX_BUFFER_INFO)          //144
+    X(RM, CTRL_KGR_GET_CTX_BUFFER_PTES)         //145
+    X(RM, CTRL_GPU_EVICT_CTX)                   //146
+    X(RM, CTRL_FB_GET_FS_INFO)                  //147
+    X(RM, CTRL_GRMGR_GET_GR_FS_INFO)            //148
+    X(RM, CTRL_STOP_CHANNEL)                    //149
+    X(RM, CTRL_GR_PC_SAMPLING_MODE)             //150
+    X(RM, CTRL_PERF_RATED_TDP_GET_STATUS)       //151
+    X(RM, CTRL_PERF_RATED_TDP_SET_CONTROL)      //152
+    X(RM, CTRL_FREE_PMA_STREAM)                 //153
+    X(RM, CTRL_TIMER_SET_GR_TICK_FREQ)          //154
+    X(RM, CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB) //155
+    X(RM, GET_CONSOLIDATED_GR_STATIC_INFO)      //156
+    X(RM, CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP)   //157
+    X(RM, CTRL_GR_GET_TPC_PARTITION_MODE)       //158
+    X(RM, CTRL_GR_SET_TPC_PARTITION_MODE)       //159
+    X(UVM, UVM_PAGING_CHANNEL_ALLOCATE)         //160
+    X(UVM, UVM_PAGING_CHANNEL_DESTROY)          //161
+    X(UVM, UVM_PAGING_CHANNEL_MAP)              //162
+    X(UVM, UVM_PAGING_CHANNEL_UNMAP)            //163
+    X(UVM, UVM_PAGING_CHANNEL_PUSH_STREAM)      //164
+    X(UVM, UVM_PAGING_CHANNEL_SET_HANDLES)      //165
+    X(UVM, UVM_METHOD_STREAM_GUEST_PAGES_OPERATION)  //166
+    X(RM, CTRL_INTERNAL_QUIESCE_PMA_CHANNEL)    //167
+    X(RM, DCE_RM_INIT)                          //168
+    X(RM, REGISTER_VIRTUAL_EVENT_BUFFER)        //169
+    X(RM, CTRL_EVENT_BUFFER_UPDATE_GET)         //170
+    X(RM, GET_PLCABLE_ADDRESS_KIND)             //171
+    X(RM, CTRL_PERF_LIMITS_SET_STATUS_V2)       //172
+    X(RM, CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM)    //173
+    X(RM, CTRL_GET_MMU_DEBUG_MODE)              //174
+    X(RM, CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS) //175
+    X(RM, CTRL_FLCN_GET_CTX_BUFFER_SIZE)        //176
+    X(RM, CTRL_FLCN_GET_CTX_BUFFER_INFO)        //177
+    X(RM, DISABLE_CHANNELS)                     //178
+    X(RM, CTRL_FABRIC_MEMORY_DESCRIBE)          //179
+    X(RM, CTRL_FABRIC_MEM_STATS)                //180
+    X(RM, SAVE_HIBERNATION_DATA)                //181
+    X(RM, RESTORE_HIBERNATION_DATA)             //182
+    X(RM, CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED) //183
+    X(RM, CTRL_EXEC_PARTITIONS_CREATE)          //184
+    X(RM, CTRL_EXEC_PARTITIONS_DELETE)          //185
+    X(RM, CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN)    //186
+    X(RM, CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX) //187
+    X(RM, PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION)  //188
+    X(RM, CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK)    //189
+    X(RM, SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER)  //190
+    X(RM, CTRL_SUBDEVICE_GET_P2P_CAPS)          // 191
+    X(RM, CTRL_BUS_SET_P2P_MAPPING)             // 192
+    X(RM, CTRL_BUS_UNSET_P2P_MAPPING)           // 193
+    X(RM, CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK)    // 194
+    X(RM, CTRL_GPU_MIGRATABLE_OPS)              // 195
+    X(RM, CTRL_GET_TOTAL_HS_CREDITS)            // 196
+    X(RM, CTRL_GET_HS_CREDITS)                  // 197
+    X(RM, CTRL_SET_HS_CREDITS)                  // 198
+    X(RM, CTRL_PM_AREA_PC_SAMPLER)              // 199
+    X(RM, INVALIDATE_TLB)                       // 200
+    X(RM, NUM_FUNCTIONS)                        //END
+#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+};
+#   undef X
+#   undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+#endif
+
+#ifndef E
+#    define E(RPC) NV_VGPU_MSG_EVENT_##RPC,
+#    define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+    E(FIRST_EVENT = 0x1000)                      // 0x1000
+    E(GSP_INIT_DONE)                             // 0x1001
+    E(GSP_RUN_CPU_SEQUENCER)                     // 0x1002
+    E(POST_EVENT)                                // 0x1003
+    E(RC_TRIGGERED)                              // 0x1004
+    E(MMU_FAULT_QUEUED)                          // 0x1005
+    E(OS_ERROR_LOG)                              // 0x1006
+    E(RG_LINE_INTR)                              // 0x1007
+    E(GPUACCT_PERFMON_UTIL_SAMPLES)              // 0x1008
+    E(SIM_READ)                                  // 0x1009
+    E(SIM_WRITE)                                 // 0x100a
+    E(SEMAPHORE_SCHEDULE_CALLBACK)               // 0x100b
+    E(UCODE_LIBOS_PRINT)                         // 0x100c
+    E(VGPU_GSP_PLUGIN_TRIGGERED)                 // 0x100d
+    E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK)       // 0x100e
+    E(PERF_BRIDGELESS_INFO_UPDATE)               // 0x100f
+    E(VGPU_CONFIG)                               // 0x1010
+    E(DISPLAY_MODESET)                           // 0x1011
+    E(EXTDEV_INTR_SERVICE)                       // 0x1012
+    E(NVLINK_INBAND_RECEIVED_DATA_256)           // 0x1013
+    E(NVLINK_INBAND_RECEIVED_DATA_512)           // 0x1014
+    E(NVLINK_INBAND_RECEIVED_DATA_1024)          // 0x1015
+    E(NVLINK_INBAND_RECEIVED_DATA_2048)          // 0x1016
+    E(NVLINK_INBAND_RECEIVED_DATA_4096)          // 0x1017
+    E(TIMED_SEMAPHORE_RELEASE)                   // 0x1018
+    E(NVLINK_IS_GPU_DEGRADED)                    // 0x1019
+    E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK)         // 0x101a
+    E(GSP_SEND_USER_SHARED_DATA)                 // 0x101b
+    E(NVLINK_FAULT_UP)                           // 0x101c
+    E(GSP_LOCKDOWN_NOTICE)                       // 0x101d
+    E(MIG_CI_CONFIG_UPDATE)                      // 0x101e
+    E(NUM_EVENTS)                                // END
+#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+};
+#   undef E
+#   undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h
new file mode 100644 (file)
index 0000000..f14b238
--- /dev/null
@@ -0,0 +1,51 @@
+#ifndef __src_nvidia_kernel_inc_vgpu_rpc_headers_h__
+#define __src_nvidia_kernel_inc_vgpu_rpc_headers_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define MAX_GPC_COUNT           32
+
+typedef enum
+{
+    NV_RPC_UPDATE_PDE_BAR_1,
+    NV_RPC_UPDATE_PDE_BAR_2,
+    NV_RPC_UPDATE_PDE_BAR_INVALID,
+} NV_RPC_UPDATE_PDE_BAR_TYPE;
+
+typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS 
+{
+    NvU32 headIndex;
+    NvU32 maxHResolution;
+    NvU32 maxVResolution;
+} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS;
+
+typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS 
+{
+    NvU32 numHeads;
+    NvU32 maxNumHeads;
+} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h
new file mode 100644 (file)
index 0000000..7801af2
--- /dev/null
@@ -0,0 +1,40 @@
+#ifndef __src_nvidia_kernel_inc_vgpu_sdk_structures_h__
+#define __src_nvidia_kernel_inc_vgpu_sdk_structures_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+struct pte_desc
+{
+    NvU32 idr:2;
+    NvU32 reserved1:14;
+    NvU32 length:16;
+    union {
+        NvU64 pte; // PTE when IDR==0; PDE when IDR > 0
+        NvU64 pde; // PTE when IDR==0; PDE when IDR > 0
+    } pte_pde[]  NV_ALIGN_BYTES(8); // PTE when IDR==0; PDE when IDR > 0
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h b/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h
new file mode 100644 (file)
index 0000000..e6833df
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVRM_NVTYPES_H__
+#define __NVRM_NVTYPES_H__
+
+#define NV_ALIGN_BYTES(a) __attribute__ ((__aligned__(a)))
+#define NV_DECLARE_ALIGNED(f,a) f __attribute__ ((__aligned__(a)))
+
+typedef u32 NvV32;
+
+typedef u8 NvU8;
+typedef u16 NvU16;
+typedef u32 NvU32;
+typedef u64 NvU64;
+
+typedef void* NvP64;
+
+typedef NvU8 NvBool;
+typedef NvU32 NvHandle;
+typedef NvU64 NvLength;
+
+typedef NvU64 RmPhysAddr;
+
+typedef NvU32 NV_STATUS;
+#endif
index 9e878cdc8e38e141ecf551a09f34b3fab90e9cef..479effcf607e261fac73361958a0a855cf90d315 100644 (file)
@@ -27,6 +27,8 @@
 #include "dispnv04/hw.h"
 #include "nouveau_encoder.h"
 
+#include <subdev/gsp.h>
+
 #include <linux/io-mapping.h>
 #include <linux/firmware.h>
 
@@ -2087,7 +2089,8 @@ nouveau_bios_init(struct drm_device *dev)
        int ret;
 
        /* only relevant for PCI devices */
-       if (!dev_is_pci(dev->dev))
+       if (!dev_is_pci(dev->dev) ||
+           nvkm_gsp_rm(nvxx_device(&drm->client.device)->gsp))
                return 0;
 
        if (!NVInitVBIOS(dev))
index 097246e10cdb738b95fd8a81c5479e91a3f72267..14da22fa3b5b72751ad4955c8456ef84e647e347 100644 (file)
@@ -36,6 +36,7 @@ int
 nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass, struct nvif_disp *disp)
 {
        static const struct nvif_mclass disps[] = {
+               { AD102_DISP, 0 },
                { GA102_DISP, 0 },
                { TU102_DISP, 0 },
                { GV100_DISP, 0 },
index 374212da9e959479c656ac36bfaba6a135dc0785..adc60b25f8e6c61e51c7db37805fa24a89378fae 100644 (file)
@@ -112,6 +112,22 @@ nvkm_firmware_put(const struct firmware *fw)
 
 #define nvkm_firmware_mem(p) container_of((p), struct nvkm_firmware, mem.memory)
 
+static struct scatterlist *
+nvkm_firmware_mem_sgl(struct nvkm_memory *memory)
+{
+       struct nvkm_firmware *fw = nvkm_firmware_mem(memory);
+
+       switch (fw->func->type) {
+       case NVKM_FIRMWARE_IMG_DMA: return &fw->mem.sgl;
+       case NVKM_FIRMWARE_IMG_SGT: return  fw->mem.sgt.sgl;
+       default:
+               WARN_ON(1);
+               break;
+       }
+
+       return NULL;
+}
+
 static int
 nvkm_firmware_mem_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
                      struct nvkm_vma *vma, void *argv, u32 argc)
@@ -120,10 +136,10 @@ nvkm_firmware_mem_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *v
        struct nvkm_vmm_map map = {
                .memory = &fw->mem.memory,
                .offset = offset,
-               .sgl = &fw->mem.sgl,
+               .sgl = nvkm_firmware_mem_sgl(memory),
        };
 
-       if (WARN_ON(fw->func->type != NVKM_FIRMWARE_IMG_DMA))
+       if (!map.sgl)
                return -ENOSYS;
 
        return nvkm_vmm_map(vmm, vma, argv, argc, &map);
@@ -132,12 +148,15 @@ nvkm_firmware_mem_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *v
 static u64
 nvkm_firmware_mem_size(struct nvkm_memory *memory)
 {
-       return sg_dma_len(&nvkm_firmware_mem(memory)->mem.sgl);
+       struct scatterlist *sgl = nvkm_firmware_mem_sgl(memory);
+
+       return sgl ? sg_dma_len(sgl) : 0;
 }
 
 static u64
 nvkm_firmware_mem_addr(struct nvkm_memory *memory)
 {
+       BUG_ON(nvkm_firmware_mem(memory)->func->type != NVKM_FIRMWARE_IMG_DMA);
        return nvkm_firmware_mem(memory)->phys;
 }
 
@@ -188,6 +207,12 @@ nvkm_firmware_dtor(struct nvkm_firmware *fw)
                nvkm_memory_unref(&memory);
                dma_free_coherent(fw->device->dev, sg_dma_len(&fw->mem.sgl), fw->img, fw->phys);
                break;
+       case NVKM_FIRMWARE_IMG_SGT:
+               nvkm_memory_unref(&memory);
+               dma_unmap_sgtable(fw->device->dev, &fw->mem.sgt, DMA_TO_DEVICE, 0);
+               sg_free_table(&fw->mem.sgt);
+               vfree(fw->img);
+               break;
        default:
                WARN_ON(1);
                break;
@@ -225,6 +250,49 @@ nvkm_firmware_ctor(const struct nvkm_firmware_func *func, const char *name,
                sg_dma_len(&fw->mem.sgl) = len;
        }
                break;
+       case NVKM_FIRMWARE_IMG_SGT:
+               len = ALIGN(fw->len, PAGE_SIZE);
+
+               fw->img = vmalloc(len);
+               if (fw->img) {
+                       int pages = len >> PAGE_SHIFT;
+                       int ret = 0;
+
+                       memcpy(fw->img, src, fw->len);
+
+                       ret = sg_alloc_table(&fw->mem.sgt, pages, GFP_KERNEL);
+                       if (ret == 0) {
+                               struct scatterlist *sgl;
+                               u8 *data = fw->img;
+                               int i;
+
+                               for_each_sgtable_sg(&fw->mem.sgt, sgl, i) {
+                                       struct page *page = vmalloc_to_page(data);
+
+                                       if (!page) {
+                                               ret = -EFAULT;
+                                               break;
+                                       }
+
+                                       sg_set_page(sgl, page, PAGE_SIZE, 0);
+                                       data += PAGE_SIZE;
+                               }
+
+                               if (ret == 0) {
+                                       ret = dma_map_sgtable(fw->device->dev, &fw->mem.sgt,
+                                                             DMA_TO_DEVICE, 0);
+                               }
+
+                               if (ret)
+                                       sg_free_table(&fw->mem.sgt);
+                       }
+
+                       if (ret) {
+                               vfree(fw->img);
+                               fw->img = NULL;
+                       }
+               }
+               break;
        default:
                WARN_ON(1);
                return -EINVAL;
index c6dfed18f35b4777f36096f733ff19993d2ab001..bfaaff645a3475608fd5626144c02eb1fef54ffd 100644 (file)
@@ -17,6 +17,8 @@ include $(src)/nvkm/engine/msppp/Kbuild
 include $(src)/nvkm/engine/msvld/Kbuild
 include $(src)/nvkm/engine/nvenc/Kbuild
 include $(src)/nvkm/engine/nvdec/Kbuild
+include $(src)/nvkm/engine/nvjpg/Kbuild
+include $(src)/nvkm/engine/ofa/Kbuild
 include $(src)/nvkm/engine/pm/Kbuild
 include $(src)/nvkm/engine/sec/Kbuild
 include $(src)/nvkm/engine/sec2/Kbuild
index 8bf1635ffabc093f8c1fca62c86fd70e71a357ec..165d61fc5d6c55dc056f37b882e65cc77b49527a 100644 (file)
@@ -10,3 +10,5 @@ nvkm-y += nvkm/engine/ce/gv100.o
 nvkm-y += nvkm/engine/ce/tu102.o
 nvkm-y += nvkm/engine/ce/ga100.o
 nvkm-y += nvkm/engine/ce/ga102.o
+
+nvkm-y += nvkm/engine/ce/r535.o
index 315a69f7fdd128d3d57a3de27c97b03bdb9f8630..9427a592bd16c05d38c089bf438020feb2163786 100644 (file)
@@ -21,6 +21,7 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
 #include <subdev/vfn.h>
 
 #include <nvif/class.h>
@@ -88,5 +89,8 @@ int
 ga100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
             struct nvkm_engine **pengine)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_ce_new(&ga100_ce, device, type, inst, pengine);
+
        return nvkm_engine_new_(&ga100_ce, device, type, inst, true, pengine);
 }
index 461b73c7e2e0f351506597714acc85cf2f643d5a..ce56ede7c2e9433cdab33040b2998888f2053045 100644 (file)
@@ -21,6 +21,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 #include <nvif/class.h>
 
 static const struct nvkm_engine_func
@@ -41,5 +43,8 @@ int
 ga102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
             struct nvkm_engine **pengine)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_ce_new(&ga102_ce, device, type, inst, pengine);
+
        return nvkm_engine_new_(&ga102_ce, device, type, inst, true, pengine);
 }
index 0be72c463b21a2c9f42a6b5a1aec62a4ed9cc456..806a76a72249306f8879da5efebf1b6d5d607a59 100644 (file)
@@ -3,6 +3,9 @@
 #define __NVKM_CE_PRIV_H__
 #include <engine/ce.h>
 
+int r535_ce_new(const struct nvkm_engine_func *, struct nvkm_device *,
+               enum nvkm_subdev_type, int, struct nvkm_engine **);
+
 void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_chan *);
 void gk104_ce_intr(struct nvkm_engine *);
 void gp100_ce_intr(struct nvkm_engine *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c
new file mode 100644 (file)
index 0000000..bd0d435
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/object.h>
+#include <subdev/gsp.h>
+#include <engine/fifo.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h>
+
+struct r535_ce_obj {
+       struct nvkm_object object;
+       struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_ce_obj_dtor(struct nvkm_object *object)
+{
+       struct r535_ce_obj *obj = container_of(object, typeof(*obj), object);
+
+       nvkm_gsp_rm_free(&obj->rm);
+       return obj;
+}
+
+static const struct nvkm_object_func
+r535_ce_obj = {
+       .dtor = r535_ce_obj_dtor,
+};
+
+static int
+r535_ce_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+                struct nvkm_object **pobject)
+{
+       struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+       struct r535_ce_obj *obj;
+       NVC0B5_ALLOCATION_PARAMETERS *args;
+
+       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_ce_obj, oclass, &obj->object);
+       *pobject = &obj->object;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
+                                    sizeof(*args), &obj->rm);
+       if (WARN_ON(IS_ERR(args)))
+               return PTR_ERR(args);
+
+       args->version = 1;
+       args->engineType = NV2080_ENGINE_TYPE_COPY0 + oclass->engine->subdev.inst;
+
+       return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
+}
+
+static void *
+r535_ce_dtor(struct nvkm_engine *engine)
+{
+       kfree(engine->func);
+       return engine;
+}
+
+int
+r535_ce_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
+           enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
+{
+       struct nvkm_engine_func *rm;
+       int nclass, ret;
+
+       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_ce_dtor;
+       for (int i = 0; i < nclass; i++) {
+               rm->sclass[i].minver = hw->sclass[i].minver;
+               rm->sclass[i].maxver = hw->sclass[i].maxver;
+               rm->sclass[i].oclass = hw->sclass[i].oclass;
+               rm->sclass[i].ctor = r535_ce_obj_ctor;
+       }
+
+       ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
index 9563c0175142f6651dadc49d9eade581bfe173ff..7c8647dcb349e1bb8bd225ef7641c1b25dd4fa3c 100644 (file)
@@ -21,6 +21,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 #include <nvif/class.h>
 
 static const struct nvkm_engine_func
@@ -37,5 +39,8 @@ int
 tu102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
             struct nvkm_engine **pengine)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_ce_new(&tu102_ce, device, type, inst, pengine);
+
        return nvkm_engine_new_(&tu102_ce, device, type, inst, true, pengine);
 }
index 1c81e5b34d29d871b35431c47b9d1ff651fc5245..31ed3da32fe7e87655a8796cb47b60d86c9213da 100644 (file)
@@ -2408,7 +2408,7 @@ nv162_chipset = {
        .fb       = { 0x00000001, tu102_fb_new },
        .fuse     = { 0x00000001, gm107_fuse_new },
        .gpio     = { 0x00000001, gk104_gpio_new },
-       .gsp      = { 0x00000001, gv100_gsp_new },
+       .gsp      = { 0x00000001, tu102_gsp_new },
        .i2c      = { 0x00000001, gm200_i2c_new },
        .imem     = { 0x00000001, nv50_instmem_new },
        .ltc      = { 0x00000001, gp102_ltc_new },
@@ -2426,8 +2426,8 @@ nv162_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
        .fifo     = { 0x00000001, tu102_fifo_new },
        .gr       = { 0x00000001, tu102_gr_new },
-       .nvdec    = { 0x00000001, gm107_nvdec_new },
-       .nvenc    = { 0x00000001, gm107_nvenc_new },
+       .nvdec    = { 0x00000001, tu102_nvdec_new },
+       .nvenc    = { 0x00000001, tu102_nvenc_new },
        .sec2     = { 0x00000001, tu102_sec2_new },
 };
 
@@ -2443,7 +2443,7 @@ nv164_chipset = {
        .fb       = { 0x00000001, tu102_fb_new },
        .fuse     = { 0x00000001, gm107_fuse_new },
        .gpio     = { 0x00000001, gk104_gpio_new },
-       .gsp      = { 0x00000001, gv100_gsp_new },
+       .gsp      = { 0x00000001, tu102_gsp_new },
        .i2c      = { 0x00000001, gm200_i2c_new },
        .imem     = { 0x00000001, nv50_instmem_new },
        .ltc      = { 0x00000001, gp102_ltc_new },
@@ -2461,8 +2461,8 @@ nv164_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
        .fifo     = { 0x00000001, tu102_fifo_new },
        .gr       = { 0x00000001, tu102_gr_new },
-       .nvdec    = { 0x00000003, gm107_nvdec_new },
-       .nvenc    = { 0x00000001, gm107_nvenc_new },
+       .nvdec    = { 0x00000003, tu102_nvdec_new },
+       .nvenc    = { 0x00000001, tu102_nvenc_new },
        .sec2     = { 0x00000001, tu102_sec2_new },
 };
 
@@ -2478,7 +2478,7 @@ nv166_chipset = {
        .fb       = { 0x00000001, tu102_fb_new },
        .fuse     = { 0x00000001, gm107_fuse_new },
        .gpio     = { 0x00000001, gk104_gpio_new },
-       .gsp      = { 0x00000001, gv100_gsp_new },
+       .gsp      = { 0x00000001, tu102_gsp_new },
        .i2c      = { 0x00000001, gm200_i2c_new },
        .imem     = { 0x00000001, nv50_instmem_new },
        .ltc      = { 0x00000001, gp102_ltc_new },
@@ -2496,8 +2496,8 @@ nv166_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
        .fifo     = { 0x00000001, tu102_fifo_new },
        .gr       = { 0x00000001, tu102_gr_new },
-       .nvdec    = { 0x00000007, gm107_nvdec_new },
-       .nvenc    = { 0x00000001, gm107_nvenc_new },
+       .nvdec    = { 0x00000007, tu102_nvdec_new },
+       .nvenc    = { 0x00000001, tu102_nvenc_new },
        .sec2     = { 0x00000001, tu102_sec2_new },
 };
 
@@ -2513,7 +2513,7 @@ nv167_chipset = {
        .fb       = { 0x00000001, tu102_fb_new },
        .fuse     = { 0x00000001, gm107_fuse_new },
        .gpio     = { 0x00000001, gk104_gpio_new },
-       .gsp      = { 0x00000001, gv100_gsp_new },
+       .gsp      = { 0x00000001, tu116_gsp_new },
        .i2c      = { 0x00000001, gm200_i2c_new },
        .imem     = { 0x00000001, nv50_instmem_new },
        .ltc      = { 0x00000001, gp102_ltc_new },
@@ -2531,8 +2531,8 @@ nv167_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
        .fifo     = { 0x00000001, tu102_fifo_new },
        .gr       = { 0x00000001, tu102_gr_new },
-       .nvdec    = { 0x00000001, gm107_nvdec_new },
-       .nvenc    = { 0x00000001, gm107_nvenc_new },
+       .nvdec    = { 0x00000001, tu102_nvdec_new },
+       .nvenc    = { 0x00000001, tu102_nvenc_new },
        .sec2     = { 0x00000001, tu102_sec2_new },
 };
 
@@ -2548,7 +2548,7 @@ nv168_chipset = {
        .fb       = { 0x00000001, tu102_fb_new },
        .fuse     = { 0x00000001, gm107_fuse_new },
        .gpio     = { 0x00000001, gk104_gpio_new },
-       .gsp      = { 0x00000001, gv100_gsp_new },
+       .gsp      = { 0x00000001, tu116_gsp_new },
        .i2c      = { 0x00000001, gm200_i2c_new },
        .imem     = { 0x00000001, nv50_instmem_new },
        .ltc      = { 0x00000001, gp102_ltc_new },
@@ -2566,8 +2566,8 @@ nv168_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
        .fifo     = { 0x00000001, tu102_fifo_new },
        .gr       = { 0x00000001, tu102_gr_new },
-       .nvdec    = { 0x00000001, gm107_nvdec_new },
-       .nvenc    = { 0x00000001, gm107_nvenc_new },
+       .nvdec    = { 0x00000001, tu102_nvdec_new },
+       .nvenc    = { 0x00000001, tu102_nvenc_new },
        .sec2     = { 0x00000001, tu102_sec2_new },
 };
 
@@ -2580,6 +2580,7 @@ nv170_chipset = {
        .fault    = { 0x00000001, tu102_fault_new },
        .fb       = { 0x00000001, ga100_fb_new },
        .gpio     = { 0x00000001, gk104_gpio_new },
+       .gsp      = { 0x00000001, ga100_gsp_new },
        .i2c      = { 0x00000001, gm200_i2c_new },
        .imem     = { 0x00000001, nv50_instmem_new },
        .mc       = { 0x00000001, ga100_mc_new },
@@ -2591,6 +2592,9 @@ nv170_chipset = {
        .vfn      = { 0x00000001, ga100_vfn_new },
        .ce       = { 0x000003ff, ga100_ce_new },
        .fifo     = { 0x00000001, ga100_fifo_new },
+       .nvdec    = { 0x0000001f, ga100_nvdec_new },
+       .nvjpg    = { 0x00000001, ga100_nvjpg_new },
+       .ofa      = { 0x00000001, ga100_ofa_new },
 };
 
 static const struct nvkm_device_chip
@@ -2619,7 +2623,9 @@ nv172_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
        .fifo     = { 0x00000001, ga102_fifo_new },
        .gr       = { 0x00000001, ga102_gr_new },
-       .nvdec    = { 0x00000001, ga102_nvdec_new },
+       .nvdec    = { 0x00000003, ga102_nvdec_new },
+       .nvenc    = { 0x00000001, ga102_nvenc_new },
+       .ofa      = { 0x00000001, ga102_ofa_new },
        .sec2     = { 0x00000001, ga102_sec2_new },
 };
 
@@ -2649,7 +2655,9 @@ nv173_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
        .fifo     = { 0x00000001, ga102_fifo_new },
        .gr       = { 0x00000001, ga102_gr_new },
-       .nvdec    = { 0x00000001, ga102_nvdec_new },
+       .nvdec    = { 0x00000003, ga102_nvdec_new },
+       .nvenc    = { 0x00000001, ga102_nvenc_new },
+       .ofa      = { 0x00000001, ga102_ofa_new },
        .sec2     = { 0x00000001, ga102_sec2_new },
 };
 
@@ -2679,7 +2687,9 @@ nv174_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
        .fifo     = { 0x00000001, ga102_fifo_new },
        .gr       = { 0x00000001, ga102_gr_new },
-       .nvdec    = { 0x00000001, ga102_nvdec_new },
+       .nvdec    = { 0x00000003, ga102_nvdec_new },
+       .nvenc    = { 0x00000001, ga102_nvenc_new },
+       .ofa      = { 0x00000001, ga102_ofa_new },
        .sec2     = { 0x00000001, ga102_sec2_new },
 };
 
@@ -2709,7 +2719,9 @@ nv176_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
        .fifo     = { 0x00000001, ga102_fifo_new },
        .gr       = { 0x00000001, ga102_gr_new },
-       .nvdec    = { 0x00000001, ga102_nvdec_new },
+       .nvdec    = { 0x00000003, ga102_nvdec_new },
+       .nvenc    = { 0x00000001, ga102_nvenc_new },
+       .ofa      = { 0x00000001, ga102_ofa_new },
        .sec2     = { 0x00000001, ga102_sec2_new },
 };
 
@@ -2739,7 +2751,139 @@ nv177_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
        .fifo     = { 0x00000001, ga102_fifo_new },
        .gr       = { 0x00000001, ga102_gr_new },
-       .nvdec    = { 0x00000001, ga102_nvdec_new },
+       .nvdec    = { 0x00000003, ga102_nvdec_new },
+       .nvenc    = { 0x00000001, ga102_nvenc_new },
+       .ofa      = { 0x00000001, ga102_ofa_new },
+       .sec2     = { 0x00000001, ga102_sec2_new },
+};
+
+static const struct nvkm_device_chip
+nv192_chipset = {
+       .name = "AD102",
+       .bar      = { 0x00000001, tu102_bar_new },
+       .bios     = { 0x00000001, nvkm_bios_new },
+       .devinit  = { 0x00000001, ga100_devinit_new },
+       .fault    = { 0x00000001, tu102_fault_new },
+       .fb       = { 0x00000001, ga102_fb_new },
+       .gsp      = { 0x00000001, ad102_gsp_new },
+       .imem     = { 0x00000001, nv50_instmem_new },
+       .mmu      = { 0x00000001, tu102_mmu_new },
+       .pci      = { 0x00000001, gp100_pci_new },
+       .timer    = { 0x00000001, gk20a_timer_new },
+       .vfn      = { 0x00000001, ga100_vfn_new },
+       .ce       = { 0x0000001f, ga102_ce_new },
+       .disp     = { 0x00000001, ad102_disp_new },
+       .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
+       .gr       = { 0x00000001, ad102_gr_new },
+       .nvdec    = { 0x0000000f, ad102_nvdec_new },
+       .nvenc    = { 0x00000007, ad102_nvenc_new },
+       .nvjpg    = { 0x0000000f, ad102_nvjpg_new },
+       .ofa      = { 0x00000001, ad102_ofa_new },
+       .sec2     = { 0x00000001, ga102_sec2_new },
+};
+
+static const struct nvkm_device_chip
+nv193_chipset = {
+       .name = "AD103",
+       .bar      = { 0x00000001, tu102_bar_new },
+       .bios     = { 0x00000001, nvkm_bios_new },
+       .devinit  = { 0x00000001, ga100_devinit_new },
+       .fault    = { 0x00000001, tu102_fault_new },
+       .fb       = { 0x00000001, ga102_fb_new },
+       .gsp      = { 0x00000001, ad102_gsp_new },
+       .imem     = { 0x00000001, nv50_instmem_new },
+       .mmu      = { 0x00000001, tu102_mmu_new },
+       .pci      = { 0x00000001, gp100_pci_new },
+       .timer    = { 0x00000001, gk20a_timer_new },
+       .vfn      = { 0x00000001, ga100_vfn_new },
+       .ce       = { 0x0000001f, ga102_ce_new },
+       .disp     = { 0x00000001, ad102_disp_new },
+       .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
+       .gr       = { 0x00000001, ad102_gr_new },
+       .nvdec    = { 0x0000000f, ad102_nvdec_new },
+       .nvenc    = { 0x00000007, ad102_nvenc_new },
+       .nvjpg    = { 0x0000000f, ad102_nvjpg_new },
+       .ofa      = { 0x00000001, ad102_ofa_new },
+       .sec2     = { 0x00000001, ga102_sec2_new },
+};
+
+static const struct nvkm_device_chip
+nv194_chipset = {
+       .name = "AD104",
+       .bar      = { 0x00000001, tu102_bar_new },
+       .bios     = { 0x00000001, nvkm_bios_new },
+       .devinit  = { 0x00000001, ga100_devinit_new },
+       .fault    = { 0x00000001, tu102_fault_new },
+       .fb       = { 0x00000001, ga102_fb_new },
+       .gsp      = { 0x00000001, ad102_gsp_new },
+       .imem     = { 0x00000001, nv50_instmem_new },
+       .mmu      = { 0x00000001, tu102_mmu_new },
+       .pci      = { 0x00000001, gp100_pci_new },
+       .timer    = { 0x00000001, gk20a_timer_new },
+       .vfn      = { 0x00000001, ga100_vfn_new },
+       .ce       = { 0x0000001f, ga102_ce_new },
+       .disp     = { 0x00000001, ad102_disp_new },
+       .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
+       .gr       = { 0x00000001, ad102_gr_new },
+       .nvdec    = { 0x0000000f, ad102_nvdec_new },
+       .nvenc    = { 0x00000007, ad102_nvenc_new },
+       .nvjpg    = { 0x0000000f, ad102_nvjpg_new },
+       .ofa      = { 0x00000001, ad102_ofa_new },
+       .sec2     = { 0x00000001, ga102_sec2_new },
+};
+
+static const struct nvkm_device_chip
+nv196_chipset = {
+       .name = "AD106",
+       .bar      = { 0x00000001, tu102_bar_new },
+       .bios     = { 0x00000001, nvkm_bios_new },
+       .devinit  = { 0x00000001, ga100_devinit_new },
+       .fault    = { 0x00000001, tu102_fault_new },
+       .fb       = { 0x00000001, ga102_fb_new },
+       .gsp      = { 0x00000001, ad102_gsp_new },
+       .imem     = { 0x00000001, nv50_instmem_new },
+       .mmu      = { 0x00000001, tu102_mmu_new },
+       .pci      = { 0x00000001, gp100_pci_new },
+       .timer    = { 0x00000001, gk20a_timer_new },
+       .vfn      = { 0x00000001, ga100_vfn_new },
+       .ce       = { 0x0000001f, ga102_ce_new },
+       .disp     = { 0x00000001, ad102_disp_new },
+       .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
+       .gr       = { 0x00000001, ad102_gr_new },
+       .nvdec    = { 0x0000000f, ad102_nvdec_new },
+       .nvenc    = { 0x00000007, ad102_nvenc_new },
+       .nvjpg    = { 0x0000000f, ad102_nvjpg_new },
+       .ofa      = { 0x00000001, ad102_ofa_new },
+       .sec2     = { 0x00000001, ga102_sec2_new },
+};
+
+static const struct nvkm_device_chip
+nv197_chipset = {
+       .name = "AD107",
+       .bar      = { 0x00000001, tu102_bar_new },
+       .bios     = { 0x00000001, nvkm_bios_new },
+       .devinit  = { 0x00000001, ga100_devinit_new },
+       .fault    = { 0x00000001, tu102_fault_new },
+       .fb       = { 0x00000001, ga102_fb_new },
+       .gsp      = { 0x00000001, ad102_gsp_new },
+       .imem     = { 0x00000001, nv50_instmem_new },
+       .mmu      = { 0x00000001, tu102_mmu_new },
+       .pci      = { 0x00000001, gp100_pci_new },
+       .timer    = { 0x00000001, gk20a_timer_new },
+       .vfn      = { 0x00000001, ga100_vfn_new },
+       .ce       = { 0x0000001f, ga102_ce_new },
+       .disp     = { 0x00000001, ad102_disp_new },
+       .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
+       .gr       = { 0x00000001, ad102_gr_new },
+       .nvdec    = { 0x0000000f, ad102_nvdec_new },
+       .nvenc    = { 0x00000007, ad102_nvenc_new },
+       .nvjpg    = { 0x0000000f, ad102_nvjpg_new },
+       .ofa      = { 0x00000001, ad102_ofa_new },
        .sec2     = { 0x00000001, ga102_sec2_new },
 };
 
@@ -3061,6 +3205,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                        case 0x140: device->card_type = GV100; break;
                        case 0x160: device->card_type = TU100; break;
                        case 0x170: device->card_type = GA100; break;
+                       case 0x190: device->card_type = AD100; break;
                        default:
                                break;
                        }
@@ -3163,6 +3308,11 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                case 0x174: device->chip = &nv174_chipset; break;
                case 0x176: device->chip = &nv176_chipset; break;
                case 0x177: device->chip = &nv177_chipset; break;
+               case 0x192: device->chip = &nv192_chipset; break;
+               case 0x193: device->chip = &nv193_chipset; break;
+               case 0x194: device->chip = &nv194_chipset; break;
+               case 0x196: device->chip = &nv196_chipset; break;
+               case 0x197: device->chip = &nv197_chipset; break;
                default:
                        if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
                                switch (device->chipset) {
index 24faaac1589178a137c969ed4b7c36b97908d4d3..bf3176bec18a5fbd6e2f42dfe6a751ec7f6a587a 100644 (file)
@@ -43,6 +43,8 @@
 #include <engine/msvld.h>
 #include <engine/nvenc.h>
 #include <engine/nvdec.h>
+#include <engine/nvjpg.h>
+#include <engine/ofa.h>
 #include <engine/pm.h>
 #include <engine/sec.h>
 #include <engine/sec2.h>
index 9b39ec34161501bdcc91514e8ca4ec7b99e82719..7fd4800a876ab77f47ab51913ec841a9f881b639 100644 (file)
@@ -147,6 +147,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
        case GV100: args->v0.family = NV_DEVICE_INFO_V0_VOLTA; break;
        case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
        case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break;
+       case AD100: args->v0.family = NV_DEVICE_INFO_V0_ADA; break;
        default:
                args->v0.family = 0;
                break;
index e1aecd3fe96c1058d82565122adbaffa96df83b8..e346e924fee8bdb14740daa86614d0540e690a64 100644 (file)
@@ -27,6 +27,9 @@ nvkm-y += nvkm/engine/disp/gp102.o
 nvkm-y += nvkm/engine/disp/gv100.o
 nvkm-y += nvkm/engine/disp/tu102.o
 nvkm-y += nvkm/engine/disp/ga102.o
+nvkm-y += nvkm/engine/disp/ad102.o
+
+nvkm-y += nvkm/engine/disp/r535.o
 
 nvkm-y += nvkm/engine/disp/udisp.o
 nvkm-y += nvkm/engine/disp/uconn.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c
new file mode 100644 (file)
index 0000000..7f300a7
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "chan.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_disp_func
+ad102_disp = {
+       .uevent = &gv100_disp_chan_uevent,
+       .ramht_size = 0x2000,
+       .root = {  0, 0,AD102_DISP },
+       .user = {
+               {{-1,-1,GV100_DISP_CAPS                  }, gv100_disp_caps_new },
+               {{ 0, 0,GA102_DISP_CURSOR                }, nvkm_disp_chan_new, &gv100_disp_curs },
+               {{ 0, 0,GA102_DISP_WINDOW_IMM_CHANNEL_DMA}, nvkm_disp_wndw_new, &gv100_disp_wimm },
+               {{ 0, 0,AD102_DISP_CORE_CHANNEL_DMA      }, nvkm_disp_core_new, &gv100_disp_core },
+               {{ 0, 0,GA102_DISP_WINDOW_CHANNEL_DMA    }, nvkm_disp_wndw_new, &gv100_disp_wndw },
+               {}
+       },
+};
+
+int
+ad102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+              struct nvkm_disp **pdisp)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_disp_new(&ad102_disp, device, type, inst, pdisp);
+
+       return -ENODEV;
+}
index 39f7e7ce9f4a2130f555c3372cb0312adb0b4b7f..457ec5db794d0b2842b108626508bcad68daa8cc 100644 (file)
@@ -105,7 +105,7 @@ nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
        struct nvkm_outp *outp;
 
        if (disp->func->fini)
-               disp->func->fini(disp);
+               disp->func->fini(disp, suspend);
 
        list_for_each_entry(outp, &disp->outps, head) {
                if (outp->func->fini)
@@ -137,7 +137,8 @@ nvkm_disp_init(struct nvkm_engine *engine)
         * each output resource to 'fully enabled'.
         */
        list_for_each_entry(ior, &disp->iors, head) {
-               ior->func->power(ior, true, true, true, true, true);
+               if (ior->func->power)
+                       ior->func->power(ior, true, true, true, true, true);
        }
 
        return 0;
@@ -208,6 +209,9 @@ nvkm_disp_dtor(struct nvkm_engine *engine)
                nvkm_head_del(&head);
        }
 
+       if (disp->func->dtor)
+               disp->func->dtor(disp);
+
        return data;
 }
 
index 398336ffb685ae080807a869427be2f97b009e43..02029051015737fb34d2ab9dadf299e7834bcee9 100644 (file)
@@ -22,6 +22,10 @@ struct nvkm_disp_chan {
        u64 push;
 
        u32 suspend_put;
+
+       struct {
+               struct nvkm_gsp_object object;
+       } rm;
 };
 
 int nvkm_disp_core_new(const struct nvkm_oclass *, void *, u32, struct nvkm_object **);
index efe66ba3c61f8e49c6ce920298e8f884ee194044..ab0a85c9243047b91913c8a9205535beb5e01981 100644 (file)
@@ -24,6 +24,7 @@
 #include "head.h"
 #include "ior.h"
 
+#include <subdev/gsp.h>
 #include <subdev/timer.h>
 
 #include <nvif/class.h>
@@ -147,5 +148,8 @@ int
 ga102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
               struct nvkm_disp **pdisp)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_disp_new(&ga102_disp, device, type, inst, pdisp);
+
        return nvkm_disp_new_(&ga102_disp, device, type, inst, pdisp);
 }
index b48ead31da30e2617b6a82096f80fd8e59a7f6b3..83a1323600ae788dd917e5e38d8701aeb4150d00 100644 (file)
@@ -1154,7 +1154,7 @@ gf119_disp_intr(struct nvkm_disp *disp)
 }
 
 void
-gf119_disp_fini(struct nvkm_disp *disp)
+gf119_disp_fini(struct nvkm_disp *disp, bool suspend)
 {
        struct nvkm_device *device = disp->engine.subdev.device;
        /* disable all interrupts */
index e1634f7bca56f6b0df1f1320f657614876826d27..cfa3698d3a2fd675e0da00e88979a05e364ffa4a 100644 (file)
@@ -96,7 +96,7 @@ gv100_sor_dp = {
        .watermark = gv100_sor_dp_watermark,
 };
 
-static void
+void
 gv100_sor_hdmi_infoframe_vsi(struct nvkm_ior *ior, int head, void *data, u32 size)
 {
        struct nvkm_device *device = ior->disp->engine.subdev.device;
@@ -120,7 +120,7 @@ gv100_sor_hdmi_infoframe_vsi(struct nvkm_ior *ior, int head, void *data, u32 siz
        nvkm_mask(device, 0x6f0100 + hoff, 0x00000001, 0x00000001);
 }
 
-static void
+void
 gv100_sor_hdmi_infoframe_avi(struct nvkm_ior *ior, int head, void *data, u32 size)
 {
        struct nvkm_device *device = ior->disp->engine.subdev.device;
@@ -1115,7 +1115,7 @@ gv100_disp_intr(struct nvkm_disp *disp)
 }
 
 void
-gv100_disp_fini(struct nvkm_disp *disp)
+gv100_disp_fini(struct nvkm_disp *disp, bool suspend)
 {
        struct nvkm_device *device = disp->engine.subdev.device;
        nvkm_wr32(device, 0x611db0, 0x00000000);
index 9beb9d1e86334149779941139acef7b6ddb044a1..3ba04bead2f9cfc3b75b62469670c11d3fdc8476 100644 (file)
@@ -187,6 +187,8 @@ int gp100_sor_new(struct nvkm_disp *, int);
 int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
 void gv100_sor_state(struct nvkm_ior *, struct nvkm_ior_state *);
 extern const struct nvkm_ior_func_hdmi gv100_sor_hdmi;
+void gv100_sor_hdmi_infoframe_avi(struct nvkm_ior *, int, void *, u32);
+void gv100_sor_hdmi_infoframe_vsi(struct nvkm_ior *, int, void *, u32);
 void gv100_sor_dp_audio(struct nvkm_ior *, int, bool);
 void gv100_sor_dp_audio_sym(struct nvkm_ior *, int, u16, u32);
 void gv100_sor_dp_watermark(struct nvkm_ior *, int, u8);
index 2d05e2f7e46b8f760c28fe9b999a394a756683e8..03a5f88a4b993c45b02de1df3eaea10515ee21d6 100644 (file)
@@ -1504,7 +1504,7 @@ nv50_disp_intr(struct nvkm_disp *disp)
 }
 
 void
-nv50_disp_fini(struct nvkm_disp *disp)
+nv50_disp_fini(struct nvkm_disp *disp, bool suspend)
 {
        struct nvkm_device *device = disp->engine.subdev.device;
        /* disable all interrupts */
index bfb2a4db8d644730e9f05b42871f043eeaaa95fa..28adc5a30f2f24db05e3294f0ace97f9bb47079f 100644 (file)
@@ -386,7 +386,8 @@ nvkm_outp_new_(const struct nvkm_outp_func *func, struct nvkm_disp *disp,
        outp->disp = disp;
        outp->index = index;
        outp->info = *dcbE;
-       outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
+       if (!disp->rm.client.gsp)
+               outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
 
        OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "
                       "edid %x bus %d head %x",
index ec5292a8f3c8513782b886aacf60429c84a382a5..a3fd7cb7c4883953f4940ff68ab02b4d4cbc09c9 100644 (file)
@@ -8,6 +8,9 @@ struct nvkm_head;
 struct nvkm_outp;
 struct dcb_output;
 
+int r535_disp_new(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+                 struct nvkm_disp **);
+
 int nvkm_disp_ctor(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
                   struct nvkm_disp *);
 int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
@@ -15,9 +18,10 @@ int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *, enum nvk
 void nvkm_disp_vblank(struct nvkm_disp *, int head);
 
 struct nvkm_disp_func {
+       void (*dtor)(struct nvkm_disp *);
        int (*oneinit)(struct nvkm_disp *);
        int (*init)(struct nvkm_disp *);
-       void (*fini)(struct nvkm_disp *);
+       void (*fini)(struct nvkm_disp *, bool suspend);
        void (*intr)(struct nvkm_disp *);
        void (*intr_error)(struct nvkm_disp *, int chid);
 
@@ -32,7 +36,7 @@ struct nvkm_disp_func {
 
        u16 ramht_size;
 
-       const struct nvkm_sclass root;
+       struct nvkm_sclass root;
 
        struct nvkm_disp_user {
                struct nvkm_sclass base;
@@ -44,7 +48,7 @@ struct nvkm_disp_func {
 
 int nv50_disp_oneinit(struct nvkm_disp *);
 int nv50_disp_init(struct nvkm_disp *);
-void nv50_disp_fini(struct nvkm_disp *);
+void nv50_disp_fini(struct nvkm_disp *, bool suspend);
 void nv50_disp_intr(struct nvkm_disp *);
 extern const struct nvkm_enum nv50_disp_intr_error_type[];
 void nv50_disp_super(struct work_struct *);
@@ -56,12 +60,12 @@ void nv50_disp_super_2_2(struct nvkm_disp *, struct nvkm_head *);
 void nv50_disp_super_3_0(struct nvkm_disp *, struct nvkm_head *);
 
 int gf119_disp_init(struct nvkm_disp *);
-void gf119_disp_fini(struct nvkm_disp *);
+void gf119_disp_fini(struct nvkm_disp *, bool suspend);
 void gf119_disp_intr(struct nvkm_disp *);
 void gf119_disp_super(struct work_struct *);
 void gf119_disp_intr_error(struct nvkm_disp *, int);
 
-void gv100_disp_fini(struct nvkm_disp *);
+void gv100_disp_fini(struct nvkm_disp *, bool suspend);
 void gv100_disp_intr(struct nvkm_disp *);
 void gv100_disp_super(struct work_struct *);
 int gv100_disp_wndw_cnt(struct nvkm_disp *, unsigned long *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
new file mode 100644 (file)
index 0000000..2980350
--- /dev/null
@@ -0,0 +1,1671 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "chan.h"
+#include "conn.h"
+#include "dp.h"
+#include "head.h"
+#include "ior.h"
+#include "outp.h"
+
+#include <core/ramht.h>
+#include <subdev/bios.h>
+#include <subdev/bios/conn.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu.h>
+#include <subdev/vfn.h>
+
+#include <nvhw/drf.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h>
+#include <nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h>
+
+#include <linux/acpi.h>
+
+static u64
+r535_chan_user(struct nvkm_disp_chan *chan, u64 *psize)
+{
+       switch (chan->object.oclass & 0xff) {
+       case 0x7d: *psize = 0x10000; return 0x680000;
+       case 0x7e: *psize = 0x01000; return 0x690000 + (chan->head * *psize);
+       case 0x7b: *psize = 0x01000; return 0x6b0000 + (chan->head * *psize);
+       case 0x7a: *psize = 0x01000; return 0x6d8000 + (chan->head * *psize);
+       default:
+               BUG_ON(1);
+               break;
+       }
+
+       return 0ULL;
+}
+
+static void
+r535_chan_intr(struct nvkm_disp_chan *chan, bool en)
+{
+}
+
+static void
+r535_chan_fini(struct nvkm_disp_chan *chan)
+{
+       nvkm_gsp_rm_free(&chan->rm.object);
+}
+
+static int
+r535_chan_push(struct nvkm_disp_chan *chan)
+{
+       struct nvkm_gsp *gsp = chan->disp->engine.subdev.device->gsp;
+       NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+                                   NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER,
+                                   sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       if (chan->memory) {
+               switch (nvkm_memory_target(chan->memory)) {
+               case NVKM_MEM_TARGET_NCOH:
+                       ctrl->addressSpace = ADDR_SYSMEM;
+                       ctrl->cacheSnoop = 0;
+                       break;
+               case NVKM_MEM_TARGET_HOST:
+                       ctrl->addressSpace = ADDR_SYSMEM;
+                       ctrl->cacheSnoop = 1;
+                       break;
+               case NVKM_MEM_TARGET_VRAM:
+                       ctrl->addressSpace = ADDR_FBMEM;
+                       break;
+               default:
+                       WARN_ON(1);
+                       return -EINVAL;
+               }
+
+               ctrl->physicalAddr = nvkm_memory_addr(chan->memory);
+               ctrl->limit = nvkm_memory_size(chan->memory) - 1;
+       }
+
+       ctrl->hclass = chan->object.oclass;
+       ctrl->channelInstance = chan->head;
+       ctrl->valid = ((chan->object.oclass & 0xff) != 0x7a) ? 1 : 0;
+
+       return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+}
+
+static int
+r535_curs_init(struct nvkm_disp_chan *chan)
+{
+       NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *args;
+       int ret;
+
+       ret = r535_chan_push(chan);
+       if (ret)
+               return ret;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object,
+                                    (chan->object.oclass << 16) | chan->head,
+                                    chan->object.oclass, sizeof(*args), &chan->rm.object);
+       if (IS_ERR(args))
+               return PTR_ERR(args);
+
+       args->channelInstance = chan->head;
+
+       return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+}
+
+static const struct nvkm_disp_chan_func
+r535_curs_func = {
+       .init = r535_curs_init,
+       .fini = r535_chan_fini,
+       .intr = r535_chan_intr,
+       .user = r535_chan_user,
+};
+
+static const struct nvkm_disp_chan_user
+r535_curs = {
+       .func = &r535_curs_func,
+       .user = 73,
+};
+
+static int
+r535_dmac_bind(struct nvkm_disp_chan *chan, struct nvkm_object *object, u32 handle)
+{
+       return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -9, handle,
+                                chan->chid.user << 25 |
+                                (chan->disp->rm.client.object.handle & 0x3fff));
+}
+
+static void
+r535_dmac_fini(struct nvkm_disp_chan *chan)
+{
+       struct nvkm_device *device = chan->disp->engine.subdev.device;
+       const u32 uoff = (chan->chid.user - 1) * 0x1000;
+
+       chan->suspend_put = nvkm_rd32(device, 0x690000 + uoff);
+       r535_chan_fini(chan);
+}
+
+static int
+r535_dmac_init(struct nvkm_disp_chan *chan)
+{
+       NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args;
+       int ret;
+
+       ret = r535_chan_push(chan);
+       if (ret)
+               return ret;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object,
+                                    (chan->object.oclass << 16) | chan->head,
+                                    chan->object.oclass, sizeof(*args), &chan->rm.object);
+       if (IS_ERR(args))
+               return PTR_ERR(args);
+
+       args->channelInstance = chan->head;
+       args->offset = chan->suspend_put;
+
+       return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+}
+
+static int
+r535_dmac_push(struct nvkm_disp_chan *chan, u64 memory)
+{
+       chan->memory = nvkm_umem_search(chan->object.client, memory);
+       if (IS_ERR(chan->memory))
+               return PTR_ERR(chan->memory);
+
+       return 0;
+}
+
+static const struct nvkm_disp_chan_func
+r535_dmac_func = {
+       .push = r535_dmac_push,
+       .init = r535_dmac_init,
+       .fini = r535_dmac_fini,
+       .intr = r535_chan_intr,
+       .user = r535_chan_user,
+       .bind = r535_dmac_bind,
+};
+
+static const struct nvkm_disp_chan_func
+r535_wimm_func = {
+       .push = r535_dmac_push,
+       .init = r535_dmac_init,
+       .fini = r535_dmac_fini,
+       .intr = r535_chan_intr,
+       .user = r535_chan_user,
+};
+
+static const struct nvkm_disp_chan_user
+r535_wimm = {
+       .func = &r535_wimm_func,
+       .user = 33,
+};
+
+static const struct nvkm_disp_chan_user
+r535_wndw = {
+       .func = &r535_dmac_func,
+       .user = 1,
+};
+
+static void
+r535_core_fini(struct nvkm_disp_chan *chan)
+{
+       struct nvkm_device *device = chan->disp->engine.subdev.device;
+
+       chan->suspend_put = nvkm_rd32(device, 0x680000);
+       r535_chan_fini(chan);
+}
+
+static const struct nvkm_disp_chan_func
+r535_core_func = {
+       .push = r535_dmac_push,
+       .init = r535_dmac_init,
+       .fini = r535_core_fini,
+       .intr = r535_chan_intr,
+       .user = r535_chan_user,
+       .bind = r535_dmac_bind,
+};
+
+static const struct nvkm_disp_chan_user
+r535_core = {
+       .func = &r535_core_func,
+       .user = 0,
+};
+
+static int
+r535_sor_bl_set(struct nvkm_ior *sor, int lvl)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS,
+                                   sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+       ctrl->brightness = lvl;
+
+       return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r535_sor_bl_get(struct nvkm_ior *sor)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
+       int lvl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS,
+                                   sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       lvl = ctrl->brightness;
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return lvl;
+}
+
+static const struct nvkm_ior_func_bl
+r535_sor_bl = {
+       .get = r535_sor_bl_get,
+       .set = r535_sor_bl_set,
+};
+
+static void
+r535_sor_hda_eld(struct nvkm_ior *sor, int head, u8 *data, u8 size)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *ctrl;
+
+       if (WARN_ON(size > sizeof(ctrl->bufferELD)))
+               return;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+       ctrl->numELDSize = size;
+       memcpy(ctrl->bufferELD, data, size);
+       ctrl->maxFreqSupported = 0; //XXX
+       ctrl->ctrl  = NVDEF(NV0073, CTRL_DFP_ELD_AUDIO_CAPS_CTRL, PD, TRUE);
+       ctrl->ctrl |= NVDEF(NV0073, CTRL_DFP_ELD_AUDIO_CAPS_CTRL, ELDV, TRUE);
+       ctrl->deviceEntry = head;
+
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_hda_hpd(struct nvkm_ior *sor, int head, bool present)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *ctrl;
+
+       if (present)
+               return;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+       ctrl->deviceEntry = head;
+
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static const struct nvkm_ior_func_hda
+r535_sor_hda = {
+       .hpd = r535_sor_hda_hpd,
+       .eld = r535_sor_hda_eld,
+};
+
+static void
+r535_sor_dp_audio_mute(struct nvkm_ior *sor, bool mute)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+       ctrl->mute = mute;
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS *ctrl;
+
+       if (!enable)
+               r535_sor_dp_audio_mute(sor, true);
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+       ctrl->enable = enable;
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+
+       if (enable)
+               r535_sor_dp_audio_mute(sor, false);
+}
+
+static void
+r535_sor_dp_vcpi(struct nvkm_ior *sor, int head, u8 slot, u8 slot_nr, u16 pbn, u16 aligned_pbn)
+{
+       struct nvkm_disp *disp = sor->disp;
+       struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_CONFIG_STREAM, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->head = head;
+       ctrl->sorIndex = sor->id;
+       ctrl->dpLink = sor->asy.link == 2;
+       ctrl->bEnableOverride = 1;
+       ctrl->bMST = 1;
+       ctrl->hBlankSym = 0;
+       ctrl->vBlankSym = 0;
+       ctrl->colorFormat = 0;
+       ctrl->bEnableTwoHeadOneOr = 0;
+       ctrl->singleHeadMultistreamMode = 0;
+       ctrl->MST.slotStart = slot;
+       ctrl->MST.slotEnd = slot + slot_nr - 1;
+       ctrl->MST.PBN = pbn;
+       ctrl->MST.Timeslice = aligned_pbn;
+       ctrl->MST.sendACT = 0;
+       ctrl->MST.singleHeadMSTPipeline = 0;
+       ctrl->MST.bEnableAudioOverRightPanel = 0;
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static int
+r535_sor_dp_sst(struct nvkm_ior *sor, int head, bool ef,
+               u32 watermark, u32 hblanksym, u32 vblanksym)
+{
+       struct nvkm_disp *disp = sor->disp;
+       struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_CONFIG_STREAM, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->head = head;
+       ctrl->sorIndex = sor->id;
+       ctrl->dpLink = sor->asy.link == 2;
+       ctrl->bEnableOverride = 1;
+       ctrl->bMST = 0;
+       ctrl->hBlankSym = hblanksym;
+       ctrl->vBlankSym = vblanksym;
+       ctrl->colorFormat = 0;
+       ctrl->bEnableTwoHeadOneOr = 0;
+       ctrl->SST.bEnhancedFraming = ef;
+       ctrl->SST.tuSize = 64;
+       ctrl->SST.waterMark = watermark;
+       ctrl->SST.bEnableAudioOverRightPanel = 0;
+       return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static const struct nvkm_ior_func_dp
+r535_sor_dp = {
+       .sst = r535_sor_dp_sst,
+       .vcpi = r535_sor_dp_vcpi,
+       .audio = r535_sor_dp_audio,
+};
+
+static void
+r535_sor_hdmi_scdc(struct nvkm_ior *sor, u32 khz, bool support, bool scrambling,
+                  bool scrambling_low_rates)
+{
+       struct nvkm_outp *outp = sor->asy.outp;
+       struct nvkm_disp *disp = outp->disp;
+       NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(outp->index);
+       ctrl->caps = 0;
+       if (support)
+               ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, SCDC_SUPPORTED, TRUE);
+       if (scrambling)
+               ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, GT_340MHZ_CLOCK_SUPPORTED, TRUE);
+       if (scrambling_low_rates)
+               ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, LTE_340MHZ_SCRAMBLING_SUPPORTED, TRUE);
+
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_hdmi_ctrl_audio_mute(struct nvkm_outp *outp, bool mute)
+{
+       struct nvkm_disp *disp = outp->disp;
+       NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(outp->index);
+       ctrl->mute = mute;
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_hdmi_ctrl_audio(struct nvkm_outp *outp, bool enable)
+{
+       struct nvkm_disp *disp = outp->disp;
+       NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(outp->index);
+       ctrl->transmitControl =
+               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, ENABLE, YES) |
+               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, OTHER_FRAME, DISABLE) |
+               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, SINGLE_FRAME, DISABLE) |
+               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, ON_HBLANK, DISABLE) |
+               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, VIDEO_FMT, SW_CONTROLLED) |
+               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, RESERVED_LEGACY_MODE, NO);
+       ctrl->packetSize = 10;
+       ctrl->aPacket[0] = 0x03;
+       ctrl->aPacket[1] = 0x00;
+       ctrl->aPacket[2] = 0x00;
+       ctrl->aPacket[3] = enable ? 0x10 : 0x01;
+       ctrl->aPacket[4] = 0x00;
+       ctrl->aPacket[5] = 0x00;
+       ctrl->aPacket[6] = 0x00;
+       ctrl->aPacket[7] = 0x00;
+       ctrl->aPacket[8] = 0x00;
+       ctrl->aPacket[9] = 0x00;
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_hdmi_audio(struct nvkm_ior *sor, int head, bool enable)
+{
+       struct nvkm_device *device = sor->disp->engine.subdev.device;
+       const u32 hdmi = head * 0x400;
+
+       r535_sor_hdmi_ctrl_audio(sor->asy.outp, enable);
+       r535_sor_hdmi_ctrl_audio_mute(sor->asy.outp, !enable);
+
+       /* General Control (GCP). */
+       nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000000);
+       nvkm_wr32(device, 0x6f00cc + hdmi, !enable ? 0x00000001 : 0x00000010);
+       nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000001);
+}
+
+static void
+r535_sor_hdmi_ctrl(struct nvkm_ior *sor, int head, bool enable, u8 max_ac_packet, u8 rekey)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS *ctrl;
+
+       if (!enable)
+               return;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+       ctrl->enable = enable;
+
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static const struct nvkm_ior_func_hdmi
+r535_sor_hdmi = {
+       .ctrl = r535_sor_hdmi_ctrl,
+       .scdc = r535_sor_hdmi_scdc,
+       /*TODO: SF_USER -> KMS. */
+       .infoframe_avi = gv100_sor_hdmi_infoframe_avi,
+       .infoframe_vsi = gv100_sor_hdmi_infoframe_vsi,
+       .audio = r535_sor_hdmi_audio,
+};
+
+static const struct nvkm_ior_func
+r535_sor = {
+       .hdmi = &r535_sor_hdmi,
+       .dp = &r535_sor_dp,
+       .hda = &r535_sor_hda,
+       .bl = &r535_sor_bl,
+};
+
+static int
+r535_sor_new(struct nvkm_disp *disp, int id)
+{
+       return nvkm_ior_new_(&r535_sor, disp, SOR, id, true/*XXX: hda cap*/);
+}
+
+static int
+r535_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
+{
+       *pmask = 0xf;
+       return 4;
+}
+
+static void
+r535_head_vblank_put(struct nvkm_head *head)
+{
+       struct nvkm_device *device = head->disp->engine.subdev.device;
+
+       nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000002, 0x00000000);
+}
+
+static void
+r535_head_vblank_get(struct nvkm_head *head)
+{
+       struct nvkm_device *device = head->disp->engine.subdev.device;
+
+       nvkm_wr32(device, 0x611800 + (head->id * 4), 0x00000002);
+       nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000002, 0x00000002);
+}
+
+static void
+r535_head_state(struct nvkm_head *head, struct nvkm_head_state *state)
+{
+}
+
+static const struct nvkm_head_func
+r535_head = {
+       .state = r535_head_state,
+       .vblank_get = r535_head_vblank_get,
+       .vblank_put = r535_head_vblank_put,
+};
+
+static struct nvkm_conn *
+r535_conn_new(struct nvkm_disp *disp, u32 id)
+{
+       NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS *ctrl;
+       struct nvbios_connE dcbE = {};
+       struct nvkm_conn *conn;
+       int ret, index;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return (void *)ctrl;
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = BIT(id);
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return (void *)ctrl;
+
+       list_for_each_entry(conn, &disp->conns, head) {
+               if (conn->index == ctrl->data[0].index) {
+                       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+                       return conn;
+               }
+       }
+
+       dcbE.type = ctrl->data[0].type;
+       index = ctrl->data[0].index;
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+       ret = nvkm_conn_new(disp, index, &dcbE, &conn);
+       if (ret)
+               return ERR_PTR(ret);
+
+       list_add_tail(&conn->head, &disp->conns);
+       return conn;
+}
+
+static void
+r535_outp_release(struct nvkm_outp *outp)
+{
+       outp->disp->rm.assigned_sors &= ~BIT(outp->ior->id);
+       outp->ior->asy.outp = NULL;
+       outp->ior = NULL;
+}
+
+static int
+r535_outp_acquire(struct nvkm_outp *outp, bool hda)
+{
+       struct nvkm_disp *disp = outp->disp;
+       struct nvkm_ior *ior;
+       NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *ctrl;
+       int or;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DFP_ASSIGN_SOR, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = BIT(outp->index);
+       ctrl->sorExcludeMask = disp->rm.assigned_sors;
+       if (hda)
+               ctrl->flags |= NVDEF(NV0073_CTRL, DFP_ASSIGN_SOR_FLAGS, AUDIO, OPTIMAL);
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       for (or = 0; or < ARRAY_SIZE(ctrl->sorAssignListWithTag); or++) {
+               if (ctrl->sorAssignListWithTag[or].displayMask & BIT(outp->index)) {
+                       disp->rm.assigned_sors |= BIT(or);
+                       break;
+               }
+       }
+
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+       if (WARN_ON(or == ARRAY_SIZE(ctrl->sorAssignListWithTag)))
+               return -EINVAL;
+
+       ior = nvkm_ior_find(disp, SOR, or);
+       if (WARN_ON(!ior))
+               return -EINVAL;
+
+       nvkm_outp_acquire_ior(outp, NVKM_OUTP_USER, ior);
+       return 0;
+}
+
+static int
+r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid)
+{
+       NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->head = head;
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       *displayid = ctrl->displayId;
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return 0;
+}
+
+static struct nvkm_ior *
+r535_outp_inherit(struct nvkm_outp *outp)
+{
+       struct nvkm_disp *disp = outp->disp;
+       struct nvkm_head *head;
+       u32 displayid;
+       int ret;
+
+       list_for_each_entry(head, &disp->heads, head) {
+               ret = r535_disp_head_displayid(disp, head->id, &displayid);
+               if (WARN_ON(ret))
+                       return NULL;
+
+               if (displayid == BIT(outp->index)) {
+                       NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl;
+                       u32 id, proto;
+                       struct nvkm_ior *ior;
+
+                       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                                   NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO,
+                                                   sizeof(*ctrl));
+                       if (IS_ERR(ctrl))
+                               return NULL;
+
+                       ctrl->subDeviceInstance = 0;
+                       ctrl->displayId = displayid;
+
+                       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+                       if (IS_ERR(ctrl))
+                               return NULL;
+
+                       id = ctrl->index;
+                       proto = ctrl->protocol;
+                       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+                       ior = nvkm_ior_find(disp, SOR, id);
+                       if (WARN_ON(!ior))
+                               return NULL;
+
+                       switch (proto) {
+                       case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A:
+                               ior->arm.proto = TMDS;
+                               ior->arm.link = 1;
+                               break;
+                       case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B:
+                               ior->arm.proto = TMDS;
+                               ior->arm.link = 2;
+                               break;
+                       case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS:
+                               ior->arm.proto = TMDS;
+                               ior->arm.link = 3;
+                               break;
+                       case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A:
+                               ior->arm.proto = DP;
+                               ior->arm.link = 1;
+                               break;
+                       case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B:
+                               ior->arm.proto = DP;
+                               ior->arm.link = 2;
+                               break;
+                       default:
+                               WARN_ON(1);
+                               return NULL;
+                       }
+
+                       ior->arm.proto_evo = proto;
+                       ior->arm.head = BIT(head->id);
+                       disp->rm.assigned_sors |= BIT(ior->id);
+                       return ior;
+               }
+       }
+
+       return NULL;
+}
+
+static int
+r535_outp_dfp_get_info(struct nvkm_outp *outp)
+{
+       NV0073_CTRL_DFP_GET_INFO_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DFP_GET_INFO, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->displayId = BIT(outp->index);
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       nvkm_debug(&disp->engine.subdev, "DFP %08x: flags:%08x flags2:%08x\n",
+                  ctrl->displayId, ctrl->flags, ctrl->flags2);
+
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return 0;
+}
+
+static int
+r535_outp_detect(struct nvkm_outp *outp)
+{
+       NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+       int ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayMask = BIT(outp->index);
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       if (ctrl->displayMask & BIT(outp->index)) {
+               ret = r535_outp_dfp_get_info(outp);
+               if (ret == 0)
+                       ret = 1;
+       } else {
+               ret = 0;
+       }
+
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return ret;
+}
+
+static int
+r535_dp_mst_id_put(struct nvkm_outp *outp, u32 id)
+{
+       NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = id;
+       return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r535_dp_mst_id_get(struct nvkm_outp *outp, u32 *pid)
+{
+       NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID,
+                                   sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = BIT(outp->index);
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       *pid = ctrl->displayIdAssigned;
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return 0;
+}
+
+static int
+r535_dp_drive(struct nvkm_outp *outp, u8 lanes, u8 pe[4], u8 vs[4])
+{
+       NV0073_CTRL_DP_LANE_DATA_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_SET_LANE_DATA, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->displayId = BIT(outp->index);
+       ctrl->numLanes = lanes;
+       for (int i = 0; i < lanes; i++)
+               ctrl->data[i] = NVVAL(NV0073_CTRL, DP_LANE_DATA,  PREEMPHASIS, pe[i]) |
+                               NVVAL(NV0073_CTRL, DP_LANE_DATA, DRIVECURRENT, vs[i]);
+
+       return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r535_dp_train_target(struct nvkm_outp *outp, u8 target, bool mst, u8 link_nr, u8 link_bw)
+{
+       struct nvkm_disp *disp = outp->disp;
+       NV0073_CTRL_DP_CTRL_PARAMS *ctrl;
+       int ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_CTRL, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = BIT(outp->index);
+       ctrl->cmd = NVDEF(NV0073_CTRL, DP_CMD, SET_LANE_COUNT, TRUE) |
+                   NVDEF(NV0073_CTRL, DP_CMD, SET_LINK_BW, TRUE) |
+                   NVDEF(NV0073_CTRL, DP_CMD, TRAIN_PHY_REPEATER, YES);
+       ctrl->data = NVVAL(NV0073_CTRL, DP_DATA, SET_LANE_COUNT, link_nr) |
+                    NVVAL(NV0073_CTRL, DP_DATA, SET_LINK_BW, link_bw) |
+                    NVVAL(NV0073_CTRL, DP_DATA, TARGET, target);
+
+       if (mst)
+               ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_FORMAT_MODE, MULTI_STREAM);
+
+       if (outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
+               ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_ENHANCED_FRAMING, TRUE);
+
+       if (target == 0 &&
+            (outp->dp.dpcd[DPCD_RC02] & 0x20) &&
+           !(outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_TPS4_SUPPORTED))
+           ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, POST_LT_ADJ_REQ_GRANTED, YES);
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ret = ctrl->err ? -EIO : 0;
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return ret;
+}
+
+static int
+r535_dp_train(struct nvkm_outp *outp, bool retrain)
+{
+       for (int target = outp->dp.lttprs; target >= 0; target--) {
+               int ret = r535_dp_train_target(outp, target, outp->dp.lt.mst,
+                                                            outp->dp.lt.nr,
+                                                            outp->dp.lt.bw);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int
+r535_dp_rates(struct nvkm_outp *outp)
+{
+       NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+
+       if (outp->conn->info.type != DCB_CONNECTOR_eDP ||
+           !outp->dp.rates || outp->dp.rate[0].dpcd < 0)
+               return 0;
+
+       if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl)))
+               return -EINVAL;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->displayId = BIT(outp->index);
+       for (int i = 0; i < outp->dp.rates; i++)
+               ctrl->linkRateTbl[outp->dp.rate[i].dpcd] = outp->dp.rate[i].rate * 10 / 200;
+
+       return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
+{
+       struct nvkm_disp *disp = outp->disp;
+       NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *ctrl;
+       u8 size = *psize;
+       int ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = BIT(outp->index);
+       ctrl->bAddrOnly = !size;
+       ctrl->cmd = type;
+       if (ctrl->bAddrOnly) {
+               ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE);
+               ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD,  I2C_MOT, FALSE);
+       }
+       ctrl->addr = addr;
+       ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0;
+       memcpy(ctrl->data, data, size);
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       memcpy(data, ctrl->data, size);
+       *psize = ctrl->size;
+       ret = ctrl->replyType;
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return ret;
+}
+
+static int
+r535_dp_aux_pwr(struct nvkm_outp *outp, bool pu)
+{
+       return 0;
+}
+
+static void
+r535_dp_release(struct nvkm_outp *outp)
+{
+       if (!outp->dp.lt.bw) {
+               if (!WARN_ON(!outp->dp.rates))
+                       outp->dp.lt.bw = outp->dp.rate[0].rate / 27000;
+               else
+                       outp->dp.lt.bw = 0x06;
+       }
+
+       outp->dp.lt.nr = 0;
+
+       r535_dp_train_target(outp, 0, outp->dp.lt.mst, outp->dp.lt.nr, outp->dp.lt.bw);
+       r535_outp_release(outp);
+}
+
+static int
+r535_dp_acquire(struct nvkm_outp *outp, bool hda)
+{
+       int ret;
+
+       ret = r535_outp_acquire(outp, hda);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static const struct nvkm_outp_func
+r535_dp = {
+       .detect = r535_outp_detect,
+       .inherit = r535_outp_inherit,
+       .acquire = r535_dp_acquire,
+       .release = r535_dp_release,
+       .dp.aux_pwr = r535_dp_aux_pwr,
+       .dp.aux_xfer = r535_dp_aux_xfer,
+       .dp.mst_id_get = r535_dp_mst_id_get,
+       .dp.mst_id_put = r535_dp_mst_id_put,
+       .dp.rates = r535_dp_rates,
+       .dp.train = r535_dp_train,
+       .dp.drive = r535_dp_drive,
+};
+
+static int
+r535_tmds_edid_get(struct nvkm_outp *outp, u8 *data, u16 *psize)
+{
+       NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+       int ret = -E2BIG;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = BIT(outp->index);
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       if (ctrl->bufferSize <= *psize) {
+               memcpy(data, ctrl->edidBuffer, ctrl->bufferSize);
+               *psize = ctrl->bufferSize;
+               ret = 0;
+       }
+
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return ret;
+}
+
+static const struct nvkm_outp_func
+r535_tmds = {
+       .detect = r535_outp_detect,
+       .inherit = r535_outp_inherit,
+       .acquire = r535_outp_acquire,
+       .release = r535_outp_release,
+       .edid_get = r535_tmds_edid_get,
+};
+
+static int
+r535_outp_new(struct nvkm_disp *disp, u32 id)
+{
+       NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl;
+       enum nvkm_ior_proto proto;
+       struct dcb_output dcbE = {};
+       struct nvkm_conn *conn;
+       struct nvkm_outp *outp;
+       u8 locn, link = 0;
+       int ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = BIT(id);
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       switch (ctrl->type) {
+       case NV0073_CTRL_SPECIFIC_OR_TYPE_NONE:
+               return 0;
+       case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR:
+               switch (ctrl->protocol) {
+               case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A:
+                       proto = TMDS;
+                       link = 1;
+                       break;
+               case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B:
+                       proto = TMDS;
+                       link = 2;
+                       break;
+               case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS:
+                       proto = TMDS;
+                       link = 3;
+                       break;
+               case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A:
+                       proto = DP;
+                       link = 1;
+                       break;
+               case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B:
+                       proto = DP;
+                       link = 2;
+                       break;
+               default:
+                       WARN_ON(1);
+                       return -EINVAL;
+               }
+
+               break;
+       default:
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
+       locn = ctrl->location;
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+       conn = r535_conn_new(disp, id);
+       if (IS_ERR(conn))
+               return PTR_ERR(conn);
+
+       switch (proto) {
+       case TMDS: dcbE.type = DCB_OUTPUT_TMDS; break;
+       case   DP: dcbE.type = DCB_OUTPUT_DP; break;
+       default:
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
+       dcbE.location = locn;
+       dcbE.connector = conn->index;
+       dcbE.heads = disp->head.mask;
+       dcbE.i2c_index = 0xff;
+       dcbE.link = dcbE.sorconf.link = link;
+
+       if (proto == TMDS) {
+               ret = nvkm_outp_new_(&r535_tmds, disp, id, &dcbE, &outp);
+               if (ret)
+                       return ret;
+       } else {
+               NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl;
+               bool mst, wm;
+
+               ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                           NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               ctrl->sorIndex = ~0;
+
+               ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
+               case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
+                       dcbE.dpconf.link_bw = 0x06;
+                       break;
+               case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70:
+                       dcbE.dpconf.link_bw = 0x0a;
+                       break;
+               case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40:
+                       dcbE.dpconf.link_bw = 0x14;
+                       break;
+               case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10:
+                       dcbE.dpconf.link_bw = 0x1e;
+                       break;
+               default:
+                       dcbE.dpconf.link_bw = 0x00;
+                       break;
+               }
+
+               mst = ctrl->bIsMultistreamSupported;
+               wm = ctrl->bHasIncreasedWatermarkLimits;
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+               if (WARN_ON(!dcbE.dpconf.link_bw))
+                       return -EINVAL;
+
+               dcbE.dpconf.link_nr = 4;
+
+               ret = nvkm_outp_new_(&r535_dp, disp, id, &dcbE, &outp);
+               if (ret)
+                       return ret;
+
+               outp->dp.mst = mst;
+               outp->dp.increased_wm = wm;
+       }
+
+
+       outp->conn = conn;
+       list_add_tail(&outp->head, &disp->outps);
+       return 0;
+}
+
+static void
+r535_disp_irq(struct nvkm_gsp_event *event, void *repv, u32 repc)
+{
+       struct nvkm_disp *disp = container_of(event, typeof(*disp), rm.irq);
+       Nv2080DpIrqNotification *irq = repv;
+
+       if (WARN_ON(repc < sizeof(*irq)))
+               return;
+
+       nvkm_debug(&disp->engine.subdev, "event: dp irq displayId %08x\n", irq->displayId);
+
+       if (irq->displayId)
+               nvkm_event_ntfy(&disp->rm.event, fls(irq->displayId) - 1, NVKM_DPYID_IRQ);
+}
+
+static void
+r535_disp_hpd(struct nvkm_gsp_event *event, void *repv, u32 repc)
+{
+       struct nvkm_disp *disp = container_of(event, typeof(*disp), rm.hpd);
+       Nv2080HotplugNotification *hpd = repv;
+
+       if (WARN_ON(repc < sizeof(*hpd)))
+               return;
+
+       nvkm_debug(&disp->engine.subdev, "event: hpd plug %08x unplug %08x\n",
+                  hpd->plugDisplayMask, hpd->unplugDisplayMask);
+
+       for (int i = 0; i < 31; i++) {
+               u32 mask = 0;
+
+               if (hpd->plugDisplayMask & BIT(i))
+                       mask |= NVKM_DPYID_PLUG;
+               if (hpd->unplugDisplayMask & BIT(i))
+                       mask |= NVKM_DPYID_UNPLUG;
+
+               if (mask)
+                       nvkm_event_ntfy(&disp->rm.event, i, mask);
+       }
+}
+
+static const struct nvkm_event_func
+r535_disp_event = {
+};
+
+static void
+r535_disp_intr_head_timing(struct nvkm_disp *disp, int head)
+{
+       struct nvkm_subdev *subdev = &disp->engine.subdev;
+       struct nvkm_device *device = subdev->device;
+       u32 stat = nvkm_rd32(device, 0x611c00 + (head * 0x04));
+
+       if (stat & 0x00000002) {
+               nvkm_disp_vblank(disp, head);
+
+               nvkm_wr32(device, 0x611800 + (head * 0x04), 0x00000002);
+       }
+}
+
+static irqreturn_t
+r535_disp_intr(struct nvkm_inth *inth)
+{
+       struct nvkm_disp *disp = container_of(inth, typeof(*disp), engine.subdev.inth);
+       struct nvkm_subdev *subdev = &disp->engine.subdev;
+       struct nvkm_device *device = subdev->device;
+       unsigned long mask = nvkm_rd32(device, 0x611ec0) & 0x000000ff;
+       int head;
+
+       for_each_set_bit(head, &mask, 8)
+               r535_disp_intr_head_timing(disp, head);
+
+       return IRQ_HANDLED;
+}
+
+static void
+r535_disp_fini(struct nvkm_disp *disp, bool suspend)
+{
+       if (!disp->engine.subdev.use.enabled)
+               return;
+
+       nvkm_gsp_rm_free(&disp->rm.object);
+
+       if (!suspend) {
+               nvkm_gsp_event_dtor(&disp->rm.irq);
+               nvkm_gsp_event_dtor(&disp->rm.hpd);
+               nvkm_event_fini(&disp->rm.event);
+
+               nvkm_gsp_rm_free(&disp->rm.objcom);
+               nvkm_gsp_device_dtor(&disp->rm.device);
+               nvkm_gsp_client_dtor(&disp->rm.client);
+       }
+}
+
+static int
+r535_disp_init(struct nvkm_disp *disp)
+{
+       int ret;
+
+       ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, disp->func->root.oclass << 16,
+                               disp->func->root.oclass, 0, &disp->rm.object);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int
+r535_disp_oneinit(struct nvkm_disp *disp)
+{
+       struct nvkm_device *device = disp->engine.subdev.device;
+       struct nvkm_gsp *gsp = device->gsp;
+       NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *ctrl;
+       int ret, i;
+
+       /* RAMIN. */
+       ret = nvkm_gpuobj_new(device, 0x10000, 0x10000, false, NULL, &disp->inst);
+       if (ret)
+               return ret;
+
+       if (WARN_ON(nvkm_memory_target(disp->inst->memory) != NVKM_MEM_TARGET_VRAM))
+               return -EINVAL;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+                                   NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM,
+                                   sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->instMemPhysAddr = nvkm_memory_addr(disp->inst->memory);
+       ctrl->instMemSize = nvkm_memory_size(disp->inst->memory);
+       ctrl->instMemAddrSpace = ADDR_FBMEM;
+       ctrl->instMemCpuCacheAttr = NV_MEMORY_WRITECOMBINED;
+
+       ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+       if (ret)
+               return ret;
+
+       /* OBJs. */
+       ret = nvkm_gsp_client_device_ctor(gsp, &disp->rm.client, &disp->rm.device);
+       if (ret)
+               return ret;
+
+       ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, 0x00730000, NV04_DISPLAY_COMMON, 0,
+                               &disp->rm.objcom);
+       if (ret)
+               return ret;
+
+       {
+               NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl;
+
+               ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+                                          NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
+                                          sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               disp->wndw.mask = ctrl->windowPresentMask;
+               disp->wndw.nr = fls(disp->wndw.mask);
+               nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+       }
+
+       /* */
+       {
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+               NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS *ctrl;
+               struct nvkm_gsp_object *subdevice = &disp->rm.client.gsp->internal.device.subdevice;
+
+               ctrl = nvkm_gsp_rm_ctrl_get(subdevice,
+                                           NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD,
+                                           sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               ctrl->status = 0x56; /* NV_ERR_NOT_SUPPORTED */
+
+               {
+                       const guid_t NBCI_DSM_GUID =
+                               GUID_INIT(0xD4A50B75, 0x65C7, 0x46F7,
+                                         0xBF, 0xB7, 0x41, 0x51, 0x4C, 0xEA, 0x02, 0x44);
+                       u64 NBCI_DSM_REV = 0x00000102;
+                       const guid_t NVHG_DSM_GUID =
+                               GUID_INIT(0x9D95A0A0, 0x0060, 0x4D48,
+                                         0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4);
+                       u64 NVHG_DSM_REV = 0x00000102;
+                       acpi_handle handle = ACPI_HANDLE(device->dev);
+
+                       if (handle && acpi_has_method(handle, "_DSM")) {
+                               bool nbci = acpi_check_dsm(handle, &NBCI_DSM_GUID, NBCI_DSM_REV,
+                                                          1ULL << 0x00000014);
+                               bool nvhg = acpi_check_dsm(handle, &NVHG_DSM_GUID, NVHG_DSM_REV,
+                                                          1ULL << 0x00000014);
+
+                               printk(KERN_ERR "bl: nbci:%d nvhg:%d\n", nbci, nvhg);
+
+                               if (nbci || nvhg) {
+                                       union acpi_object argv4 = {
+                                               .buffer.type    = ACPI_TYPE_BUFFER,
+                                               .buffer.length  = sizeof(ctrl->backLightData),
+                                               .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
+                                       }, *obj;
+
+                                       obj = acpi_evaluate_dsm(handle, nbci ? &NBCI_DSM_GUID : &NVHG_DSM_GUID,
+                                                               0x00000102, 0x14, &argv4);
+                                       if (!obj) {
+                                               acpi_handle_info(handle, "failed to evaluate _DSM\n");
+                                       } else {
+                                               printk(KERN_ERR "bl: obj type %d\n", obj->type);
+                                               printk(KERN_ERR "bl: obj len %d\n", obj->package.count);
+
+                                               for (int i = 0; i < obj->package.count; i++) {
+                                                       union acpi_object *elt = &obj->package.elements[i];
+                                                       u32 size;
+
+                                                       if (elt->integer.value & ~0xffffffffULL)
+                                                               size = 8;
+                                                       else
+                                                               size = 4;
+
+                                                       printk(KERN_ERR "elt %03d: type %d size %d\n", i, elt->type, size);
+                                                       memcpy(&ctrl->backLightData[ctrl->backLightDataSize], &elt->integer.value, size);
+                                                       ctrl->backLightDataSize += size;
+                                               }
+
+                                               printk(KERN_ERR "bl: data size %d\n", ctrl->backLightDataSize);
+                                               ctrl->status = 0;
+                                               ACPI_FREE(obj);
+                                       }
+
+                                       kfree(argv4.buffer.pointer);
+                               }
+                       }
+               }
+
+               ret = nvkm_gsp_rm_ctrl_wr(subdevice, ctrl);
+               if (ret)
+                       return ret;
+#endif
+       }
+
+       /* */
+       {
+               NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS *ctrl;
+
+               ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                           NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT,
+                                           sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               ret = nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+               if (ret)
+                       return ret;
+       }
+
+       /* */
+       {
+               NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS *ctrl;
+
+               ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+                                          NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS, sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               disp->head.nr = ctrl->numHeads;
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       }
+
+       /* */
+       {
+               NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS *ctrl;
+
+               ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+                                          NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK,
+                                          sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               disp->head.mask = ctrl->headMask;
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+               for_each_set_bit(i, &disp->head.mask, disp->head.nr) {
+                       ret = nvkm_head_new_(&r535_head, disp, i);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       disp->sor.nr = disp->func->sor.cnt(disp, &disp->sor.mask);
+       nvkm_debug(&disp->engine.subdev, "   SOR(s): %d (%02lx)\n", disp->sor.nr, disp->sor.mask);
+       for_each_set_bit(i, &disp->sor.mask, disp->sor.nr) {
+               ret = disp->func->sor.new(disp, i);
+               if (ret)
+                       return ret;
+       }
+
+       /* */
+       {
+               NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl;
+               unsigned long mask;
+               int i;
+
+               ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+                                          NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               mask = ctrl->displayMask;
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+               for_each_set_bit(i, &mask, 32) {
+                       ret = r535_outp_new(disp, i);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       ret = nvkm_event_init(&r535_disp_event, &gsp->subdev, 3, 32, &disp->rm.event);
+       if (WARN_ON(ret))
+               return ret;
+
+       ret = nvkm_gsp_device_event_ctor(&disp->rm.device, 0x007e0000, NV2080_NOTIFIERS_HOTPLUG,
+                                        r535_disp_hpd, &disp->rm.hpd);
+       if (ret)
+               return ret;
+
+       ret = nvkm_gsp_device_event_ctor(&disp->rm.device, 0x007e0001, NV2080_NOTIFIERS_DP_IRQ,
+                                        r535_disp_irq, &disp->rm.irq);
+       if (ret)
+               return ret;
+
+       /* RAMHT. */
+       ret = nvkm_ramht_new(device, disp->func->ramht_size ? disp->func->ramht_size :
+                            0x1000, 0, disp->inst, &disp->ramht);
+       if (ret)
+               return ret;
+
+       ret = nvkm_gsp_intr_stall(gsp, disp->engine.subdev.type, disp->engine.subdev.inst);
+       if (ret < 0)
+               return ret;
+
+       ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &disp->engine.subdev,
+                           r535_disp_intr, &disp->engine.subdev.inth);
+       if (ret)
+               return ret;
+
+       nvkm_inth_allow(&disp->engine.subdev.inth);
+       return 0;
+}
+
+static void
+r535_disp_dtor(struct nvkm_disp *disp)
+{
+       kfree(disp->func);
+}
+
+int
+r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device,
+             enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
+{
+       struct nvkm_disp_func *rm;
+       int ret;
+
+       if (!(rm = kzalloc(sizeof(*rm) + 6 * sizeof(rm->user[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_disp_dtor;
+       rm->oneinit = r535_disp_oneinit;
+       rm->init = r535_disp_init;
+       rm->fini = r535_disp_fini;
+       rm->uevent = hw->uevent;
+       rm->sor.cnt = r535_sor_cnt;
+       rm->sor.new = r535_sor_new;
+       rm->ramht_size = hw->ramht_size;
+
+       rm->root = hw->root;
+
+       for (int i = 0; hw->user[i].ctor; i++) {
+               switch (hw->user[i].base.oclass & 0xff) {
+               case 0x73: rm->user[i] = hw->user[i]; break;
+               case 0x7d: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_core; break;
+               case 0x7e: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wndw; break;
+               case 0x7b: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wimm; break;
+               case 0x7a: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_curs; break;
+               default:
+                       WARN_ON(1);
+                       continue;
+               }
+       }
+
+       ret = nvkm_disp_new_(rm, device, type, inst, pdisp);
+       if (ret)
+               kfree(rm);
+
+       mutex_init(&(*pdisp)->super.mutex); //XXX
+       return ret;
+}
index 19f5d3a6035eb3cc900eb58c7fb33a7c571b2ce2..dcb9f8ba374ca6ba80236b33c2affdd28cabe28e 100644 (file)
@@ -25,6 +25,7 @@
 #include "ior.h"
 
 #include <core/gpuobj.h>
+#include <subdev/gsp.h>
 #include <subdev/timer.h>
 
 #include <nvif/class.h>
@@ -233,5 +234,8 @@ int
 tu102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
               struct nvkm_disp **pdisp)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_disp_new(&tu102_disp, device, type, inst, pdisp);
+
        return nvkm_disp_new_(&tu102_disp, device, type, inst, pdisp);
 }
index 104f6ee9ae6d50c17de4f8a237972341699ffd03..2dab6612c4fc84651e8b8d7fa2fd7056c057399a 100644 (file)
 
 #include <nvif/if0011.h>
 
+static int
+nvkm_uconn_uevent_gsp(struct nvkm_object *object, u64 token, u32 bits)
+{
+       union nvif_conn_event_args args;
+
+       args.v0.version = 0;
+       args.v0.types = 0;
+       if (bits & NVKM_DPYID_PLUG)
+               args.v0.types |= NVIF_CONN_EVENT_V0_PLUG;
+       if (bits & NVKM_DPYID_UNPLUG)
+               args.v0.types |= NVIF_CONN_EVENT_V0_UNPLUG;
+       if (bits & NVKM_DPYID_IRQ)
+               args.v0.types |= NVIF_CONN_EVENT_V0_IRQ;
+
+       return object->client->event(token, &args, sizeof(args.v0));
+}
+
 static int
 nvkm_uconn_uevent_aux(struct nvkm_object *object, u64 token, u32 bits)
 {
@@ -78,13 +95,14 @@ static int
 nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
 {
        struct nvkm_conn *conn = nvkm_uconn(object);
-       struct nvkm_device *device = conn->disp->engine.subdev.device;
+       struct nvkm_disp *disp = conn->disp;
+       struct nvkm_device *device = disp->engine.subdev.device;
        struct nvkm_outp *outp;
        union nvif_conn_event_args *args = argv;
        u64 bits = 0;
 
        if (!uevent) {
-               if (conn->info.hpd == DCB_GPIO_UNUSED)
+               if (!disp->rm.client.gsp && conn->info.hpd == DCB_GPIO_UNUSED)
                        return -ENOSYS;
                return 0;
        }
@@ -100,6 +118,15 @@ nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_
        if (&outp->head == &conn->disp->outps)
                return -EINVAL;
 
+       if (disp->rm.client.gsp) {
+               if (args->v0.types & NVIF_CONN_EVENT_V0_PLUG  ) bits |= NVKM_DPYID_PLUG;
+               if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_DPYID_UNPLUG;
+               if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ   ) bits |= NVKM_DPYID_IRQ;
+
+               return nvkm_uevent_add(uevent, &disp->rm.event, outp->index, bits,
+                                      nvkm_uconn_uevent_gsp);
+       }
+
        if (outp->dp.aux && !outp->info.location) {
                if (args->v0.types & NVIF_CONN_EVENT_V0_PLUG  ) bits |= NVKM_I2C_PLUG;
                if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_I2C_UNPLUG;
index d619b40a42c36f0a1b0986921de134732c5dca30..fd5ee9f0af360c93b117ef1c4affe7ac8b55ba48 100644 (file)
@@ -318,14 +318,14 @@ nvkm_falcon_init(struct nvkm_engine *engine)
 }
 
 static void *
-nvkm_falcon_dtor(struct nvkm_engine *engine)
+nvkm_falcon_dtor_engine(struct nvkm_engine *engine)
 {
        return nvkm_falcon(engine);
 }
 
 static const struct nvkm_engine_func
 nvkm_falcon = {
-       .dtor = nvkm_falcon_dtor,
+       .dtor = nvkm_falcon_dtor_engine,
        .oneinit = nvkm_falcon_oneinit,
        .init = nvkm_falcon_init,
        .fini = nvkm_falcon_fini,
index 5a074b9970abe6612e0ee93ddfc32f1939c9701d..aff92848abfee8b346479d896dfcb10ffdc9fbff 100644 (file)
@@ -26,5 +26,7 @@ nvkm-y += nvkm/engine/fifo/tu102.o
 nvkm-y += nvkm/engine/fifo/ga100.o
 nvkm-y += nvkm/engine/fifo/ga102.o
 
+nvkm-y += nvkm/engine/fifo/r535.o
+
 nvkm-y += nvkm/engine/fifo/ucgrp.o
 nvkm-y += nvkm/engine/fifo/uchan.o
index 5db37247dc29b2f1f9062981b2bf535fe2632c69..22443fe4a39ff6e036279dde140932ab26f20a6c 100644 (file)
@@ -210,6 +210,8 @@ nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
                                CASE(SEC2  );
                                CASE(NVDEC );
                                CASE(NVENC );
+                               CASE(NVJPG );
+                               CASE(OFA   );
                                default:
                                        WARN_ON(1);
                                        break;
@@ -347,8 +349,14 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
        nvkm_chid_unref(&fifo->cgid);
        nvkm_chid_unref(&fifo->chid);
 
+       mutex_destroy(&fifo->userd.mutex);
+
        nvkm_event_fini(&fifo->nonstall.event);
        mutex_destroy(&fifo->mutex);
+
+       if (fifo->func->dtor)
+               fifo->func->dtor(fifo);
+
        return fifo;
 }
 
@@ -383,5 +391,8 @@ nvkm_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
        spin_lock_init(&fifo->lock);
        mutex_init(&fifo->mutex);
 
+       INIT_LIST_HEAD(&fifo->userd.list);
+       mutex_init(&fifo->userd.mutex);
+
        return nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
 }
index ea53fb3d5d06f1b3fc0f862eb133d70c575dcbe3..814db9daa194c4ece28cd3d7188f917bea7fdfb2 100644 (file)
@@ -156,6 +156,9 @@ nvkm_cgrp_vctx_get(struct nvkm_cgrp *cgrp, struct nvkm_engn *engn, struct nvkm_c
                atomic_inc(&vctx->vmm->engref[engn->engine->subdev.type]);
 
        /* Allocate the HW structures. */
+       if (engn->func->ctor2) {
+               ret = engn->func->ctor2(engn, vctx, chan);
+       } else
        if (engn->func->bind) {
                ret = nvkm_object_bind(vctx->ectx->object, NULL, 0, &vctx->inst);
                if (ret == 0 && engn->func->ctor)
index b7c9d6115bce37a6f55ca55ad4bd44467e0626a8..87a62d4ff4bda45ee532b0598bbe99b247566879 100644 (file)
@@ -275,13 +275,17 @@ nvkm_chan_del(struct nvkm_chan **pchan)
        nvkm_gpuobj_del(&chan->cache);
        nvkm_gpuobj_del(&chan->ramfc);
 
-       nvkm_memory_unref(&chan->userd.mem);
-
        if (chan->cgrp) {
-               nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
+               if (!chan->func->id_put)
+                       nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
+               else
+                       chan->func->id_put(chan);
+
                nvkm_cgrp_unref(&chan->cgrp);
        }
 
+       nvkm_memory_unref(&chan->userd.mem);
+
        if (chan->vmm) {
                nvkm_vmm_part(chan->vmm, chan->inst->memory);
                nvkm_vmm_unref(&chan->vmm);
@@ -438,7 +442,32 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru
        }
 
        /* Allocate channel ID. */
-       chan->id = nvkm_chid_get(runl->chid, chan);
+       if (!chan->func->id_get) {
+               chan->id = nvkm_chid_get(runl->chid, chan);
+               if (chan->id >= 0) {
+                       if (func->userd->bar < 0) {
+                               if (ouserd + chan->func->userd->size >=
+                                       nvkm_memory_size(userd)) {
+                                       RUNL_DEBUG(runl, "ouserd %llx", ouserd);
+                                       return -EINVAL;
+                               }
+
+                               ret = nvkm_memory_kmap(userd, &chan->userd.mem);
+                               if (ret) {
+                                       RUNL_DEBUG(runl, "userd %d", ret);
+                                       return ret;
+                               }
+
+                               chan->userd.base = ouserd;
+                       } else {
+                               chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
+                               chan->userd.base = chan->id * chan->func->userd->size;
+                       }
+               }
+       } else {
+               chan->id = chan->func->id_get(chan, userd, ouserd);
+       }
+
        if (chan->id < 0) {
                RUNL_ERROR(runl, "!chids");
                return -ENOSPC;
@@ -448,24 +477,6 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru
                cgrp->id = chan->id;
 
        /* Initialise USERD. */
-       if (func->userd->bar < 0) {
-               if (ouserd + chan->func->userd->size >= nvkm_memory_size(userd)) {
-                       RUNL_DEBUG(runl, "ouserd %llx", ouserd);
-                       return -EINVAL;
-               }
-
-               ret = nvkm_memory_kmap(userd, &chan->userd.mem);
-               if (ret) {
-                       RUNL_DEBUG(runl, "userd %d", ret);
-                       return ret;
-               }
-
-               chan->userd.base = ouserd;
-       } else {
-               chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
-               chan->userd.base = chan->id * chan->func->userd->size;
-       }
-
        if (chan->func->userd->clear)
                chan->func->userd->clear(chan);
 
index 85b94f699128d3db025ff2042b45d1293bd55a92..013682a709d56e3901a09a4f89f841e8d84145d8 100644 (file)
@@ -17,6 +17,9 @@ struct nvkm_cctx {
 };
 
 struct nvkm_chan_func {
+       int (*id_get)(struct nvkm_chan *, struct nvkm_memory *userd, u64 ouserd);
+       void (*id_put)(struct nvkm_chan *);
+
        const struct nvkm_chan_func_inst {
                u32 size;
                bool zero;
index c56d2a839efbaffdb6c0a01843973863b56c30de..c8ce7ff187135b0992b52a3c62d8a48593b2e625 100644 (file)
@@ -27,6 +27,7 @@
 #include "runq.h"
 
 #include <core/gpuobj.h>
+#include <subdev/gsp.h>
 #include <subdev/top.h>
 #include <subdev/vfn.h>
 
@@ -607,5 +608,8 @@ int
 ga100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
               struct nvkm_fifo **pfifo)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_fifo_new(&ga100_fifo, device, type, inst, pfifo);
+
        return nvkm_fifo_new_(&ga100_fifo, device, type, inst, pfifo);
 }
index 2cdf5da339b60bc609a8ebd1d45160dd2722430b..755235f55b3aca564d7d182b152251f53d131048 100644 (file)
@@ -21,6 +21,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 #include <nvif/class.h>
 
 static const struct nvkm_fifo_func
@@ -34,12 +36,15 @@ ga102_fifo = {
        .engn = &ga100_engn,
        .engn_ce = &ga100_engn_ce,
        .cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A  }, &ga100_cgrp, .force = true },
-       .chan = {{ 0, 0, AMPERE_CHANNEL_GPFIFO_B }, &ga100_chan },
+       .chan = {{ 0, 0, AMPERE_CHANNEL_GPFIFO_A }, &ga100_chan },
 };
 
 int
 ga102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
               struct nvkm_fifo **pfifo)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_fifo_new(&ga102_fifo, device, type, inst, pfifo);
+
        return nvkm_fifo_new_(&ga102_fifo, device, type, inst, pfifo);
 }
index 4d448be19224a8a0f86e7bfe08eaa66b3a70e117..a0f3277605a5cf4e21d6501e23c07b4704c4be07 100644 (file)
@@ -13,6 +13,8 @@ struct nvkm_runq;
 struct nvkm_vctx;
 
 struct nvkm_fifo_func {
+       void (*dtor)(struct nvkm_fifo *);
+
        int (*chid_nr)(struct nvkm_fifo *);
        int (*chid_ctor)(struct nvkm_fifo *, int nr);
        int (*runq_nr)(struct nvkm_fifo *);
@@ -58,6 +60,8 @@ struct nvkm_fifo_func {
        } chan;
 };
 
+int r535_fifo_new(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+                 struct nvkm_fifo **);
 int nvkm_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
                   struct nvkm_fifo **);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
new file mode 100644 (file)
index 0000000..3adbb05
--- /dev/null
@@ -0,0 +1,664 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "cgrp.h"
+#include "chan.h"
+#include "chid.h"
+#include "runl.h"
+
+#include <core/gpuobj.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu.h>
+#include <subdev/vfn.h>
+#include <engine/gr.h>
+
+#include <nvhw/drf.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h>
+#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
+#include <nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h>
+
+static u32
+r535_chan_doorbell_handle(struct nvkm_chan *chan)
+{
+       return (chan->cgrp->runl->id << 16) | chan->id;
+}
+
+static void
+r535_chan_stop(struct nvkm_chan *chan)
+{
+}
+
+static void
+r535_chan_start(struct nvkm_chan *chan)
+{
+}
+
+static void
+r535_chan_ramfc_clear(struct nvkm_chan *chan)
+{
+       struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+
+       nvkm_gsp_rm_free(&chan->rm.object);
+
+       dma_free_coherent(fifo->engine.subdev.device->dev, fifo->rm.mthdbuf_size,
+                         chan->rm.mthdbuf.ptr, chan->rm.mthdbuf.addr);
+
+       nvkm_cgrp_vctx_put(chan->cgrp, &chan->rm.grctx);
+}
+
+#define CHID_PER_USERD 8
+
+static int
+r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+{
+       struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+       struct nvkm_engn *engn;
+       struct nvkm_device *device = fifo->engine.subdev.device;
+       NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
+       const int userd_p = chan->id / CHID_PER_USERD;
+       const int userd_i = chan->id % CHID_PER_USERD;
+       u32 eT = ~0;
+       int ret;
+
+       if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) {
+               ret = nvkm_subdev_oneinit(&device->gr->engine.subdev);
+               if (ret)
+                       return ret;
+       }
+
+       nvkm_runl_foreach_engn(engn, chan->cgrp->runl) {
+               eT = engn->id;
+               break;
+       }
+
+       if (WARN_ON(eT == ~0))
+               return -EINVAL;
+
+       chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev,
+                                                 fifo->rm.mthdbuf_size,
+                                                 &chan->rm.mthdbuf.addr, GFP_KERNEL);
+       if (!chan->rm.mthdbuf.ptr)
+               return -ENOMEM;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->vmm->rm.device.object, 0xf1f00000 | chan->id,
+                                    fifo->func->chan.user.oclass, sizeof(*args),
+                                    &chan->rm.object);
+       if (WARN_ON(IS_ERR(args)))
+               return PTR_ERR(args);
+
+       args->gpFifoOffset = offset;
+       args->gpFifoEntries = length / 8;
+
+       args->flags  = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL);
+       args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE);
+       args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, chan->runq);
+       if (!priv)
+               args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE);
+       else
+               args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE);
+       args->flags |= NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE);
+
+       args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, userd_i);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE);
+       args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, userd_p);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE);
+
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT);
+       args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
+
+       args->hVASpace = chan->vmm->rm.object.handle;
+       args->engineType = eT;
+
+       args->instanceMem.base = chan->inst->addr;
+       args->instanceMem.size = chan->inst->size;
+       args->instanceMem.addressSpace = 2;
+       args->instanceMem.cacheAttrib = 1;
+
+       args->userdMem.base = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
+       args->userdMem.size = fifo->func->chan.func->userd->size;
+       args->userdMem.addressSpace = 2;
+       args->userdMem.cacheAttrib = 1;
+
+       args->ramfcMem.base = chan->inst->addr + 0;
+       args->ramfcMem.size = 0x200;
+       args->ramfcMem.addressSpace = 2;
+       args->ramfcMem.cacheAttrib = 1;
+
+       args->mthdbufMem.base = chan->rm.mthdbuf.addr;
+       args->mthdbufMem.size = fifo->rm.mthdbuf_size;
+       args->mthdbufMem.addressSpace = 1;
+       args->mthdbufMem.cacheAttrib = 0;
+
+       if (!priv)
+               args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, USER);
+       else
+               args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN);
+       args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE);
+       args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
+
+       ret = nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+       if (ret)
+               return ret;
+
+       if (1) {
+               NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *ctrl;
+
+               if (1) {
+                       NVA06F_CTRL_BIND_PARAMS *ctrl;
+
+                       ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object,
+                                                   NVA06F_CTRL_CMD_BIND, sizeof(*ctrl));
+                       if (WARN_ON(IS_ERR(ctrl)))
+                               return PTR_ERR(ctrl);
+
+                       ctrl->engineType = eT;
+
+                       ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl);
+                       if (ret)
+                               return ret;
+               }
+
+               ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object,
+                                           NVA06F_CTRL_CMD_GPFIFO_SCHEDULE, sizeof(*ctrl));
+               if (WARN_ON(IS_ERR(ctrl)))
+                       return PTR_ERR(ctrl);
+
+               ctrl->bEnable = 1;
+               ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl);
+       }
+
+       return ret;
+}
+
+static const struct nvkm_chan_func_ramfc
+r535_chan_ramfc = {
+       .write = r535_chan_ramfc_write,
+       .clear = r535_chan_ramfc_clear,
+       .devm = 0xfff,
+       .priv = true,
+};
+
+struct r535_chan_userd {
+       struct nvkm_memory *mem;
+       struct nvkm_memory *map;
+       int chid;
+       u32 used;
+
+       struct list_head head;
+} *userd;
+
+static void
+r535_chan_id_put(struct nvkm_chan *chan)
+{
+       struct nvkm_runl *runl = chan->cgrp->runl;
+       struct nvkm_fifo *fifo = runl->fifo;
+       struct r535_chan_userd *userd;
+
+       mutex_lock(&fifo->userd.mutex);
+       list_for_each_entry(userd, &fifo->userd.list, head) {
+               if (userd->map == chan->userd.mem) {
+                       u32 chid = chan->userd.base / chan->func->userd->size;
+
+                       userd->used &= ~BIT(chid);
+                       if (!userd->used) {
+                               nvkm_memory_unref(&userd->map);
+                               nvkm_memory_unref(&userd->mem);
+                               nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
+                               list_del(&userd->head);
+                       }
+
+                       break;
+               }
+       }
+       mutex_unlock(&fifo->userd.mutex);
+
+}
+
+static int
+r535_chan_id_get_locked(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd)
+{
+       const u32 userd_size = CHID_PER_USERD * chan->func->userd->size;
+       struct nvkm_runl *runl = chan->cgrp->runl;
+       struct nvkm_fifo *fifo = runl->fifo;
+       struct r535_chan_userd *userd;
+       u32 chid;
+       int ret;
+
+       if (ouserd + chan->func->userd->size >= userd_size ||
+           (ouserd & (chan->func->userd->size - 1))) {
+               RUNL_DEBUG(runl, "ouserd %llx", ouserd);
+               return -EINVAL;
+       }
+
+       chid = div_u64(ouserd, chan->func->userd->size);
+
+       list_for_each_entry(userd, &fifo->userd.list, head) {
+               if (userd->mem == muserd) {
+                       if (userd->used & BIT(chid))
+                               return -EBUSY;
+                       break;
+               }
+       }
+
+       if (&userd->head == &fifo->userd.list) {
+               if (nvkm_memory_size(muserd) < userd_size) {
+                       RUNL_DEBUG(runl, "userd too small");
+                       return -EINVAL;
+               }
+
+               userd = kzalloc(sizeof(*userd), GFP_KERNEL);
+               if (!userd)
+                       return -ENOMEM;
+
+               userd->chid = nvkm_chid_get(runl->chid, chan);
+               if (userd->chid < 0) {
+                       ret = userd->chid;
+                       kfree(userd);
+                       return ret;
+               }
+
+               userd->mem = nvkm_memory_ref(muserd);
+
+               ret = nvkm_memory_kmap(userd->mem, &userd->map);
+               if (ret) {
+                       nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
+                       kfree(userd);
+                       return ret;
+               }
+
+
+               list_add(&userd->head, &fifo->userd.list);
+       }
+
+       userd->used |= BIT(chid);
+
+       chan->userd.mem = nvkm_memory_ref(userd->map);
+       chan->userd.base = ouserd;
+
+       return (userd->chid * CHID_PER_USERD) + chid;
+}
+
+static int
+r535_chan_id_get(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd)
+{
+       struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+       int ret;
+
+       mutex_lock(&fifo->userd.mutex);
+       ret = r535_chan_id_get_locked(chan, muserd, ouserd);
+       mutex_unlock(&fifo->userd.mutex);
+       return ret;
+}
+
+static const struct nvkm_chan_func
+r535_chan = {
+       .id_get = r535_chan_id_get,
+       .id_put = r535_chan_id_put,
+       .inst = &gf100_chan_inst,
+       .userd = &gv100_chan_userd,
+       .ramfc = &r535_chan_ramfc,
+       .start = r535_chan_start,
+       .stop = r535_chan_stop,
+       .doorbell_handle = r535_chan_doorbell_handle,
+};
+
+static const struct nvkm_cgrp_func
+r535_cgrp = {
+};
+
+static int
+r535_engn_nonstall(struct nvkm_engn *engn)
+{
+       struct nvkm_subdev *subdev = &engn->engine->subdev;
+       int ret;
+
+       ret = nvkm_gsp_intr_nonstall(subdev->device->gsp, subdev->type, subdev->inst);
+       WARN_ON(ret < 0);
+       return ret;
+}
+
+static const struct nvkm_engn_func
+r535_ce = {
+       .nonstall = r535_engn_nonstall,
+};
+
+static int
+r535_gr_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
+{
+       /* RM requires GR context buffers to remain mapped until after the
+        * channel has been destroyed (as opposed to after the last gr obj
+        * has been deleted).
+        *
+        * Take an extra ref here, which will be released once the channel
+        * object has been deleted.
+        */
+       refcount_inc(&vctx->refs);
+       chan->rm.grctx = vctx;
+       return 0;
+}
+
+static const struct nvkm_engn_func
+r535_gr = {
+       .nonstall = r535_engn_nonstall,
+       .ctor2 = r535_gr_ctor,
+};
+
+static int
+r535_flcn_bind(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
+{
+       struct nvkm_gsp_client *client = &chan->vmm->rm.client;
+       NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&chan->vmm->rm.device.subdevice,
+                                   NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->hClient = client->object.handle;
+       ctrl->hObject = chan->rm.object.handle;
+       ctrl->hChanClient = client->object.handle;
+       ctrl->virtAddress = vctx->vma->addr;
+       ctrl->size = vctx->inst->size;
+       ctrl->engineType = engn->id;
+       ctrl->ChID = chan->id;
+
+       return nvkm_gsp_rm_ctrl_wr(&chan->vmm->rm.device.subdevice, ctrl);
+}
+
+static int
+r535_flcn_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
+{
+       int ret;
+
+       if (WARN_ON(!engn->rm.size))
+               return -EINVAL;
+
+       ret = nvkm_gpuobj_new(engn->engine->subdev.device, engn->rm.size, 0, true, NULL,
+                             &vctx->inst);
+       if (ret)
+               return ret;
+
+       ret = nvkm_vmm_get(vctx->vmm, 12, vctx->inst->size, &vctx->vma);
+       if (ret)
+               return ret;
+
+       ret = nvkm_memory_map(vctx->inst, 0, vctx->vmm, vctx->vma, NULL, 0);
+       if (ret)
+               return ret;
+
+       return r535_flcn_bind(engn, vctx, chan);
+}
+
+static const struct nvkm_engn_func
+r535_flcn = {
+       .nonstall = r535_engn_nonstall,
+       .ctor2 = r535_flcn_ctor,
+};
+
+static void
+r535_runl_allow(struct nvkm_runl *runl, u32 engm)
+{
+}
+
+static void
+r535_runl_block(struct nvkm_runl *runl, u32 engm)
+{
+}
+
+static const struct nvkm_runl_func
+r535_runl = {
+       .block = r535_runl_block,
+       .allow = r535_runl_allow,
+};
+
+static int
+r535_fifo_2080_type(enum nvkm_subdev_type type, int inst)
+{
+       switch (type) {
+       case NVKM_ENGINE_GR: return NV2080_ENGINE_TYPE_GR0;
+       case NVKM_ENGINE_CE: return NV2080_ENGINE_TYPE_COPY0 + inst;
+       case NVKM_ENGINE_SEC2: return NV2080_ENGINE_TYPE_SEC2;
+       case NVKM_ENGINE_NVDEC: return NV2080_ENGINE_TYPE_NVDEC0 + inst;
+       case NVKM_ENGINE_NVENC: return NV2080_ENGINE_TYPE_NVENC0 + inst;
+       case NVKM_ENGINE_NVJPG: return NV2080_ENGINE_TYPE_NVJPEG0 + inst;
+       case NVKM_ENGINE_OFA: return NV2080_ENGINE_TYPE_OFA;
+       case NVKM_ENGINE_SW: return NV2080_ENGINE_TYPE_SW;
+       default:
+               break;
+       }
+
+       WARN_ON(1);
+       return -EINVAL;
+}
+
+static int
+r535_fifo_engn_type(RM_ENGINE_TYPE rm, enum nvkm_subdev_type *ptype)
+{
+       switch (rm) {
+       case RM_ENGINE_TYPE_GR0:
+               *ptype = NVKM_ENGINE_GR;
+               return 0;
+       case RM_ENGINE_TYPE_COPY0...RM_ENGINE_TYPE_COPY9:
+               *ptype = NVKM_ENGINE_CE;
+               return rm - RM_ENGINE_TYPE_COPY0;
+       case RM_ENGINE_TYPE_NVDEC0...RM_ENGINE_TYPE_NVDEC7:
+               *ptype = NVKM_ENGINE_NVDEC;
+               return rm - RM_ENGINE_TYPE_NVDEC0;
+       case RM_ENGINE_TYPE_NVENC0...RM_ENGINE_TYPE_NVENC2:
+               *ptype = NVKM_ENGINE_NVENC;
+               return rm - RM_ENGINE_TYPE_NVENC0;
+       case RM_ENGINE_TYPE_SW:
+               *ptype = NVKM_ENGINE_SW;
+               return 0;
+       case RM_ENGINE_TYPE_SEC2:
+               *ptype = NVKM_ENGINE_SEC2;
+               return 0;
+       case RM_ENGINE_TYPE_NVJPEG0...RM_ENGINE_TYPE_NVJPEG7:
+               *ptype = NVKM_ENGINE_NVJPG;
+               return rm - RM_ENGINE_TYPE_NVJPEG0;
+       case RM_ENGINE_TYPE_OFA:
+               *ptype = NVKM_ENGINE_OFA;
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int
+r535_fifo_ectx_size(struct nvkm_fifo *fifo)
+{
+       NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS *ctrl;
+       struct nvkm_gsp *gsp = fifo->engine.subdev.device->gsp;
+       struct nvkm_runl *runl;
+       struct nvkm_engn *engn;
+
+       ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+                                  NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO,
+                                  sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return PTR_ERR(ctrl);
+
+       for (int i = 0; i < ctrl->numConstructedFalcons; i++) {
+               nvkm_runl_foreach(runl, fifo) {
+                       nvkm_runl_foreach_engn(engn, runl) {
+                               if (engn->rm.desc == ctrl->constructedFalconsTable[i].engDesc) {
+                                       engn->rm.size =
+                                               ctrl->constructedFalconsTable[i].ctxBufferSize;
+                                       break;
+                               }
+                       }
+               }
+       }
+
+       nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+       return 0;
+}
+
+static int
+r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
+{
+       struct nvkm_subdev *subdev = &fifo->engine.subdev;
+       struct nvkm_gsp *gsp = subdev->device->gsp;
+       struct nvkm_runl *runl;
+       struct nvkm_engn *engn;
+       u32 cgids = 2048;
+       u32 chids = 2048 / CHID_PER_USERD;
+       int ret;
+       NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl;
+
+       if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, cgids, 0, cgids, &fifo->cgid)) ||
+           (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, 0, chids, &fifo->chid)))
+               return ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+                                  NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return PTR_ERR(ctrl);
+
+       for (int i = 0; i < ctrl->numEntries; i++) {
+               const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE];
+               const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST];
+
+               runl = nvkm_runl_get(fifo, id, addr);
+               if (!runl) {
+                       runl = nvkm_runl_new(fifo, id, addr, 0);
+                       if (WARN_ON(IS_ERR(runl)))
+                               continue;
+               }
+       }
+
+       for (int i = 0; i < ctrl->numEntries; i++) {
+               const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE];
+               const u32 rmid = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RM_ENGINE_TYPE];
+               const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST];
+               enum nvkm_subdev_type type;
+               int inst, nv2080;
+
+               runl = nvkm_runl_get(fifo, id, addr);
+               if (!runl)
+                       continue;
+
+               inst = r535_fifo_engn_type(rmid, &type);
+               if (inst < 0) {
+                       nvkm_warn(subdev, "RM_ENGINE_TYPE 0x%x\n", rmid);
+                       nvkm_runl_del(runl);
+                       continue;
+               }
+
+               nv2080 = r535_fifo_2080_type(type, inst);
+               if (nv2080 < 0) {
+                       nvkm_runl_del(runl);
+                       continue;
+               }
+
+               switch (type) {
+               case NVKM_ENGINE_CE:
+                       engn = nvkm_runl_add(runl, nv2080, &r535_ce, type, inst);
+                       break;
+               case NVKM_ENGINE_GR:
+                       engn = nvkm_runl_add(runl, nv2080, &r535_gr, type, inst);
+                       break;
+               case NVKM_ENGINE_NVDEC:
+               case NVKM_ENGINE_NVENC:
+               case NVKM_ENGINE_NVJPG:
+               case NVKM_ENGINE_OFA:
+                       engn = nvkm_runl_add(runl, nv2080, &r535_flcn, type, inst);
+                       break;
+               case NVKM_ENGINE_SW:
+                       continue;
+               default:
+                       engn = NULL;
+                       break;
+               }
+
+               if (!engn) {
+                       nvkm_runl_del(runl);
+                       continue;
+               }
+
+               engn->rm.desc = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_ENG_DESC];
+       }
+
+       nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+
+       {
+               NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS *ctrl;
+
+               ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+                                          NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE,
+                                          sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               fifo->rm.mthdbuf_size = ctrl->size;
+
+               nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+       }
+
+       return r535_fifo_ectx_size(fifo);
+}
+
+static void
+r535_fifo_dtor(struct nvkm_fifo *fifo)
+{
+       kfree(fifo->func);
+}
+
+int
+r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device,
+             enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
+{
+       struct nvkm_fifo_func *rm;
+
+       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_fifo_dtor;
+       rm->runl_ctor = r535_fifo_runl_ctor;
+       rm->runl = &r535_runl;
+       rm->cgrp = hw->cgrp;
+       rm->cgrp.func = &r535_cgrp;
+       rm->chan = hw->chan;
+       rm->chan.func = &r535_chan;
+       rm->nonstall = &ga100_fifo_nonstall;
+       rm->nonstall_ctor = ga100_fifo_nonstall_ctor;
+
+       return nvkm_fifo_new_(rm, device, type, inst, pfifo);
+}
index 5421321f8e85f3a7e94cae5a76b869ad0d33641c..19e6772ead11fc78627eae42640dec3d55ece407 100644 (file)
@@ -18,6 +18,7 @@ struct nvkm_engn {
                bool (*mmu_fault_triggered)(struct nvkm_engn *);
                int (*ctor)(struct nvkm_engn *, struct nvkm_vctx *);
                void (*bind)(struct nvkm_engn *, struct nvkm_cctx *, struct nvkm_chan *);
+               int (*ctor2)(struct nvkm_engn *, struct nvkm_vctx *, struct nvkm_chan *);
                int (*ramht_add)(struct nvkm_engn *, struct nvkm_object *, struct nvkm_chan *);
                void (*ramht_del)(struct nvkm_chan *, int hash);
        } *func;
@@ -28,6 +29,11 @@ struct nvkm_engn {
 
        int fault;
 
+       struct {
+               u32 desc;
+               u32 size;
+       } rm;
+
        struct list_head head;
 };
 
index ea9e151dbb488286511f3272f1a3de22d18339f4..1d39a6840a404d1409758db65d935b1c3a1cad72 100644 (file)
@@ -25,6 +25,7 @@
 #include "runl.h"
 
 #include <core/memory.h>
+#include <subdev/gsp.h>
 #include <subdev/mc.h>
 #include <subdev/vfn.h>
 
@@ -282,5 +283,8 @@ int
 tu102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
               struct nvkm_fifo **pfifo)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_fifo_new(&tu102_fifo, device, type, inst, pfifo);
+
        return nvkm_fifo_new_(&tu102_fifo, device, type, inst, pfifo);
 }
index 04140e0110beb0f5c6a85ca6ecb7c1b891f00476..9e56bcc166ed54b25d14a0c77fad6d53dc41292b 100644 (file)
@@ -317,6 +317,15 @@ nvkm_uchan = {
        .uevent = nvkm_uchan_uevent,
 };
 
+struct nvkm_chan *
+nvkm_uchan_chan(struct nvkm_object *object)
+{
+       if (WARN_ON(object->func != &nvkm_uchan))
+               return NULL;
+
+       return nvkm_uchan(object)->chan;
+}
+
 int
 nvkm_uchan_new(struct nvkm_fifo *fifo, struct nvkm_cgrp *cgrp, const struct nvkm_oclass *oclass,
               void *argv, u32 argc, struct nvkm_object **pobject)
index b5418f05ccd8b46c7664fd449b51f90ff3f9eb25..1555f8c40b4f3d93b6cb9453759211df5704b438 100644 (file)
@@ -41,6 +41,9 @@ nvkm-y += nvkm/engine/gr/gp10b.o
 nvkm-y += nvkm/engine/gr/gv100.o
 nvkm-y += nvkm/engine/gr/tu102.o
 nvkm-y += nvkm/engine/gr/ga102.o
+nvkm-y += nvkm/engine/gr/ad102.o
+
+nvkm-y += nvkm/engine/gr/r535.o
 
 nvkm-y += nvkm/engine/gr/ctxnv40.o
 nvkm-y += nvkm/engine/gr/ctxnv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c
new file mode 100644 (file)
index 0000000..7bfa624
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct gf100_gr_func
+ad102_gr = {
+       .sclass = {
+               { -1, -1, FERMI_TWOD_A },
+               { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+               { -1, -1, ADA_A },
+               { -1, -1, ADA_COMPUTE_A },
+               {}
+       }
+};
+
+int
+ad102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_gr_new(&ad102_gr, device, type, inst, pgr);
+
+       return -ENODEV;
+}
index 0096ad401b15321d2b535107cc09ab2f542b9f0e..f5e68f09df768d4486fb1909e40e8a2dcdf1024d 100644 (file)
@@ -160,7 +160,11 @@ static int
 nvkm_gr_init(struct nvkm_engine *engine)
 {
        struct nvkm_gr *gr = nvkm_gr(engine);
-       return gr->func->init(gr);
+
+       if (gr->func->init)
+               return gr->func->init(gr);
+
+       return 0;
 }
 
 static int
index 00cd70abad67e2cb601916035666f1934f3090db..d285c597aff9c4a0d208aa956d9d45d126d440e4 100644 (file)
@@ -23,6 +23,7 @@
 #include "ctxgf100.h"
 
 #include <core/firmware.h>
+#include <subdev/gsp.h>
 #include <subdev/acr.h>
 #include <subdev/timer.h>
 #include <subdev/vfn.h>
@@ -350,5 +351,8 @@ ga102_gr_fwif[] = {
 int
 ga102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_gr_new(&ga102_gr, device, type, inst, pgr);
+
        return gf100_gr_new_(ga102_gr_fwif, device, type, inst, pgr);
 }
index 54f686ba39ac2fff3cdbed960539c053e8ac9154..b0e0c93050345d73ac57d6cd0e9e4297f81b6b8f 100644 (file)
@@ -445,4 +445,6 @@ void gp108_gr_acr_bld_patch(struct nvkm_acr *, u32, s64);
 
 int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
                  struct nvkm_gr **);
+int r535_gr_new(const struct gf100_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+               struct nvkm_gr **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c
new file mode 100644 (file)
index 0000000..f4bed3e
--- /dev/null
@@ -0,0 +1,508 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <core/memory.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu/vmm.h>
+#include <engine/fifo/priv.h>
+
+#include <nvif/if900d.h>
+
+#include <nvhw/drf.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
+#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
+
+#define r535_gr(p) container_of((p), struct r535_gr, base)
+
+#define R515_GR_MAX_CTXBUFS 9
+
+struct r535_gr {
+       struct nvkm_gr base;
+
+       struct {
+               u16 bufferId;
+               u32 size;
+               u8  page;
+               u8  align;
+               bool global;
+               bool init;
+               bool ro;
+       } ctxbuf[R515_GR_MAX_CTXBUFS];
+       int ctxbuf_nr;
+
+       struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS];
+};
+
+struct r535_gr_chan {
+       struct nvkm_object object;
+       struct r535_gr *gr;
+
+       struct nvkm_vmm *vmm;
+       struct nvkm_chan *chan;
+
+       struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS];
+       struct nvkm_vma    *vma[R515_GR_MAX_CTXBUFS];
+};
+
+struct r535_gr_obj {
+       struct nvkm_object object;
+       struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_gr_obj_dtor(struct nvkm_object *object)
+{
+       struct r535_gr_obj *obj = container_of(object, typeof(*obj), object);
+
+       nvkm_gsp_rm_free(&obj->rm);
+       return obj;
+}
+
+static const struct nvkm_object_func
+r535_gr_obj = {
+       .dtor = r535_gr_obj_dtor,
+};
+
+static int
+r535_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+                struct nvkm_object **pobject)
+{
+       struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object);
+       struct r535_gr_obj *obj;
+
+       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_gr_obj, oclass, &obj->object);
+       *pobject = &obj->object;
+
+       return nvkm_gsp_rm_alloc(&chan->chan->rm.object, oclass->handle, oclass->base.oclass, 0,
+                                &obj->rm);
+}
+
+static void *
+r535_gr_chan_dtor(struct nvkm_object *object)
+{
+       struct r535_gr_chan *grc = container_of(object, typeof(*grc), object);
+       struct r535_gr *gr = grc->gr;
+
+       for (int i = 0; i < gr->ctxbuf_nr; i++) {
+               nvkm_vmm_put(grc->vmm, &grc->vma[i]);
+               nvkm_memory_unref(&grc->mem[i]);
+       }
+
+       nvkm_vmm_unref(&grc->vmm);
+       return grc;
+}
+
+static const struct nvkm_object_func
+r535_gr_chan = {
+       .dtor = r535_gr_chan_dtor,
+};
+
+static int
+r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm,
+                   struct nvkm_memory **pmem, struct nvkm_vma **pvma,
+                   struct nvkm_gsp_object *chan)
+{
+       struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+       struct nvkm_device *device = subdev->device;
+       NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.subdevice,
+                                   NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return PTR_ERR(ctrl);
+
+       ctrl->engineType = 1;
+       ctrl->hChanClient = vmm->rm.client.object.handle;
+       ctrl->hObject = chan->handle;
+
+       for (int i = 0; i < gr->ctxbuf_nr; i++) {
+               NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *entry =
+                       &ctrl->promoteEntry[ctrl->entryCount];
+               const bool alloc = golden || !gr->ctxbuf[i].global;
+               int ret;
+
+               entry->bufferId = gr->ctxbuf[i].bufferId;
+               entry->bInitialize = gr->ctxbuf[i].init && alloc;
+
+               if (alloc) {
+                       ret = nvkm_memory_new(device, gr->ctxbuf[i].init ?
+                                             NVKM_MEM_TARGET_INST : NVKM_MEM_TARGET_INST_SR_LOST,
+                                             gr->ctxbuf[i].size, 1 << gr->ctxbuf[i].page,
+                                             gr->ctxbuf[i].init, &pmem[i]);
+                       if (WARN_ON(ret))
+                               return ret;
+
+                       if (gr->ctxbuf[i].bufferId ==
+                                       NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP)
+                               entry->bNonmapped = 1;
+               } else {
+                       if (gr->ctxbuf[i].bufferId ==
+                               NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP)
+                               continue;
+
+                       pmem[i] = nvkm_memory_ref(gr->ctxbuf_mem[i]);
+               }
+
+               if (!entry->bNonmapped) {
+                       struct gf100_vmm_map_v0 args = {
+                               .priv = 1,
+                               .ro   = gr->ctxbuf[i].ro,
+                       };
+
+                       mutex_lock(&vmm->mutex.vmm);
+                       ret = nvkm_vmm_get_locked(vmm, false, true, false, 0, gr->ctxbuf[i].align,
+                                                 nvkm_memory_size(pmem[i]), &pvma[i]);
+                       mutex_unlock(&vmm->mutex.vmm);
+                       if (ret)
+                               return ret;
+
+                       ret = nvkm_memory_map(pmem[i], 0, vmm, pvma[i], &args, sizeof(args));
+                       if (ret)
+                               return ret;
+
+                       entry->gpuVirtAddr = pvma[i]->addr;
+               }
+
+               if (entry->bInitialize) {
+                       entry->gpuPhysAddr = nvkm_memory_addr(pmem[i]);
+                       entry->size = gr->ctxbuf[i].size;
+                       entry->physAttr = 4;
+               }
+
+               nvkm_debug(subdev,
+                          "promote %02d: pa %016llx/%08x sz %016llx va %016llx init:%d nm:%d\n",
+                          entry->bufferId, entry->gpuPhysAddr, entry->physAttr, entry->size,
+                          entry->gpuVirtAddr, entry->bInitialize, entry->bNonmapped);
+
+               ctrl->entryCount++;
+       }
+
+       return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl);
+}
+
+static int
+r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass,
+                struct nvkm_object **pobject)
+{
+       struct r535_gr *gr = r535_gr(base);
+       struct r535_gr_chan *grc;
+       int ret;
+
+       if (!(grc = kzalloc(sizeof(*grc), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_gr_chan, oclass, &grc->object);
+       grc->gr = gr;
+       grc->vmm = nvkm_vmm_ref(chan->vmm);
+       grc->chan = chan;
+       *pobject = &grc->object;
+
+       ret = r535_gr_promote_ctx(gr, false, grc->vmm, grc->mem, grc->vma, &chan->rm.object);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static u64
+r535_gr_units(struct nvkm_gr *gr)
+{
+       struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp;
+
+       return (gsp->gr.tpcs << 8) | gsp->gr.gpcs;
+}
+
+static int
+r535_gr_oneinit(struct nvkm_gr *base)
+{
+       NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
+       struct r535_gr *gr = container_of(base, typeof(*gr), base);
+       struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+       struct nvkm_device *device = subdev->device;
+       struct nvkm_gsp *gsp = device->gsp;
+       struct nvkm_mmu *mmu = device->mmu;
+       struct {
+               struct nvkm_memory *inst;
+               struct nvkm_vmm *vmm;
+               struct nvkm_gsp_object chan;
+               struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
+       } golden = {};
+       int ret;
+
+       /* Allocate a channel to use for golden context init. */
+       ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x12000, 0, true, &golden.inst);
+       if (ret)
+               goto done;
+
+       ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grGoldenVmm", &golden.vmm);
+       if (ret)
+               goto done;
+
+       ret = mmu->func->promote_vmm(golden.vmm);
+       if (ret)
+               goto done;
+
+       {
+               NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
+
+               args = nvkm_gsp_rm_alloc_get(&golden.vmm->rm.device.object, 0xf1f00000,
+                                            device->fifo->func->chan.user.oclass,
+                                            sizeof(*args), &golden.chan);
+               if (IS_ERR(args)) {
+                       ret = PTR_ERR(args);
+                       goto done;
+               }
+
+               args->gpFifoOffset = 0;
+               args->gpFifoEntries = 0x1000 / 8;
+               args->flags =
+                       NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL) |
+                       NVDEF(NVOS04, FLAGS, VPR, FALSE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE) |
+                       NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, 0) |
+                       NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE) |
+                       NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE) |
+                       NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, 0) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE) |
+                       NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, 0) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE) |
+                       NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE) |
+                       NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT) |
+                       NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE) |
+                       NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
+               args->hVASpace = golden.vmm->rm.object.handle;
+               args->engineType = 1;
+               args->instanceMem.base = nvkm_memory_addr(golden.inst);
+               args->instanceMem.size = 0x1000;
+               args->instanceMem.addressSpace = 2;
+               args->instanceMem.cacheAttrib = 1;
+               args->ramfcMem.base = nvkm_memory_addr(golden.inst);
+               args->ramfcMem.size = 0x200;
+               args->ramfcMem.addressSpace = 2;
+               args->ramfcMem.cacheAttrib = 1;
+               args->userdMem.base = nvkm_memory_addr(golden.inst) + 0x1000;
+               args->userdMem.size = 0x200;
+               args->userdMem.addressSpace = 2;
+               args->userdMem.cacheAttrib = 1;
+               args->mthdbufMem.base = nvkm_memory_addr(golden.inst) + 0x2000;
+               args->mthdbufMem.size = 0x5000;
+               args->mthdbufMem.addressSpace = 2;
+               args->mthdbufMem.cacheAttrib = 1;
+               args->internalFlags =
+                       NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN) |
+                       NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE) |
+                       NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
+
+               ret = nvkm_gsp_rm_alloc_wr(&golden.chan, args);
+               if (ret)
+                       goto done;
+       }
+
+       /* Fetch context buffer info from RM and allocate each of them here to use
+        * during golden context init (or later as a global context buffer).
+        *
+        * Also build the information that'll be used to create channel contexts.
+        */
+       info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+                                  NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
+                                  sizeof(*info));
+       if (WARN_ON(IS_ERR(info))) {
+               ret = PTR_ERR(info);
+               goto done;
+       }
+
+       for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++) {
+               static const struct {
+                       u32     id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */
+                       u32     id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */
+                       bool global;
+                       bool   init;
+                       bool     ro;
+               } map[] = {
+#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \
+                       .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \
+                       .global = (G), .init = (I), .ro = (R) }
+#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R))
+                       /*                                       global   init     ro */
+                       _A(           GRAPHICS,             MAIN, false,  true, false),
+                       _B(                                PATCH, false,  true, false),
+                       _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB,  true, false, false),
+                       _B(                             PAGEPOOL,  true, false, false),
+                       _B(                         ATTRIBUTE_CB,  true, false, false),
+                       _B(                        RTV_CB_GLOBAL,  true, false, false),
+                       _B(                           FECS_EVENT,  true,  true, false),
+                       _B(                      PRIV_ACCESS_MAP,  true,  true,  true),
+#undef _B
+#undef _A
+               };
+               u32 size = info->engineContextBuffersInfo[0].engine[i].size;
+               u8 align, page;
+               int id;
+
+               for (id = 0; id < ARRAY_SIZE(map); id++) {
+                       if (map[id].id0 == i)
+                               break;
+               }
+
+               nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i,
+                          size, (id < ARRAY_SIZE(map)) ? "*" : "");
+               if (id >= ARRAY_SIZE(map))
+                       continue;
+
+               if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN)
+                       size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */
+
+               if      (size >= 1 << 21) page = 21;
+               else if (size >= 1 << 16) page = 16;
+               else                      page = 12;
+
+               if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB)
+                       align = order_base_2(size);
+               else
+                       align = page;
+
+               if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
+                       continue;
+
+               gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1;
+               gr->ctxbuf[gr->ctxbuf_nr].size     = size;
+               gr->ctxbuf[gr->ctxbuf_nr].page     = page;
+               gr->ctxbuf[gr->ctxbuf_nr].align    = align;
+               gr->ctxbuf[gr->ctxbuf_nr].global   = map[id].global;
+               gr->ctxbuf[gr->ctxbuf_nr].init     = map[id].init;
+               gr->ctxbuf[gr->ctxbuf_nr].ro       = map[id].ro;
+               gr->ctxbuf_nr++;
+
+               if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) {
+                       if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
+                               continue;
+
+                       gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1];
+                       gr->ctxbuf[gr->ctxbuf_nr].bufferId =
+                               NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP;
+                       gr->ctxbuf_nr++;
+               }
+       }
+
+       nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
+
+       /* Promote golden context to RM. */
+       ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan);
+       if (ret)
+               goto done;
+
+       /* Allocate 3D class on channel to trigger golden context init in RM. */
+       {
+               int i;
+
+               for (i = 0; gr->base.func->sclass[i].ctor; i++) {
+                       if ((gr->base.func->sclass[i].oclass & 0xff) == 0x97) {
+                               struct nvkm_gsp_object threed;
+
+                               ret = nvkm_gsp_rm_alloc(&golden.chan, 0x97000000,
+                                                       gr->base.func->sclass[i].oclass, 0,
+                                                       &threed);
+                               if (ret)
+                                       goto done;
+
+                               nvkm_gsp_rm_free(&threed);
+                               break;
+                       }
+               }
+
+               if (WARN_ON(!gr->base.func->sclass[i].ctor)) {
+                       ret = -EINVAL;
+                       goto done;
+               }
+       }
+
+done:
+       nvkm_gsp_rm_free(&golden.chan);
+       for (int i = gr->ctxbuf_nr - 1; i >= 0; i--)
+               nvkm_vmm_put(golden.vmm, &golden.vma[i]);
+       nvkm_vmm_unref(&golden.vmm);
+       nvkm_memory_unref(&golden.inst);
+       return ret;
+
+}
+
+static void *
+r535_gr_dtor(struct nvkm_gr *base)
+{
+       struct r535_gr *gr = r535_gr(base);
+
+       while (gr->ctxbuf_nr)
+               nvkm_memory_unref(&gr->ctxbuf_mem[--gr->ctxbuf_nr]);
+
+       kfree(gr->base.func);
+       return gr;
+}
+
+int
+r535_gr_new(const struct gf100_gr_func *hw,
+           struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
+{
+       struct nvkm_gr_func *rm;
+       struct r535_gr *gr;
+       int nclass;
+
+       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_gr_dtor;
+       rm->oneinit = r535_gr_oneinit;
+       rm->units = r535_gr_units;
+       rm->chan_new = r535_gr_chan_new;
+
+       for (int i = 0; i < nclass; i++) {
+               rm->sclass[i].minver = hw->sclass[i].minver;
+               rm->sclass[i].maxver = hw->sclass[i].maxver;
+               rm->sclass[i].oclass = hw->sclass[i].oclass;
+               rm->sclass[i].ctor = r535_gr_obj_ctor;
+       }
+
+       if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) {
+               kfree(rm);
+               return -ENOMEM;
+       }
+
+       *pgr = &gr->base;
+
+       return nvkm_gr_ctor(rm, device, type, inst, true, &gr->base);
+}
index a7775aa185415a35ac3851237b1cda4aedc835c8..b7a458e9040a3e26a6eeef0814289c99729318f1 100644 (file)
@@ -22,6 +22,8 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <subdev/gsp.h>
+
 #include <nvif/class.h>
 
 void
@@ -216,5 +218,8 @@ tu102_gr_fwif[] = {
 int
 tu102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_gr_new(&tu102_gr, device, type, inst, pgr);
+
        return gf100_gr_new_(tu102_gr_fwif, device, type, inst, pgr);
 }
index f05e79670d22736647b3995b0065907bbfd0573a..2b0e923cb75541f7fcad98eec9fc7cd7a95c9f3b 100644 (file)
@@ -1,4 +1,9 @@
 # SPDX-License-Identifier: MIT
 nvkm-y += nvkm/engine/nvdec/base.o
 nvkm-y += nvkm/engine/nvdec/gm107.o
+nvkm-y += nvkm/engine/nvdec/tu102.o
+nvkm-y += nvkm/engine/nvdec/ga100.o
 nvkm-y += nvkm/engine/nvdec/ga102.o
+nvkm-y += nvkm/engine/nvdec/ad102.o
+
+nvkm-y += nvkm/engine/nvdec/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c
new file mode 100644 (file)
index 0000000..d72b3aa
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ad102_nvdec = {
+       .sclass = {
+               { -1, -1, NVC9B0_VIDEO_DECODER },
+               {}
+       }
+};
+
+int
+ad102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+               struct nvkm_nvdec **pnvdec)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_nvdec_new(&ad102_nvdec, device, type, inst, pnvdec);
+
+       return -ENODEV;
+}
index 1f6e3b32ba163cfc70c8e21acbb1cdb58c3d94e8..7d1c6791ae8265721177179521c5b0982bc71188 100644 (file)
@@ -33,6 +33,7 @@ nvkm_nvdec_dtor(struct nvkm_engine *engine)
 static const struct nvkm_engine_func
 nvkm_nvdec = {
        .dtor = nvkm_nvdec_dtor,
+       .sclass = { {} },
 };
 
 int
@@ -58,4 +59,4 @@ nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device,
 
        return nvkm_falcon_ctor(nvdec->func->flcn, &nvdec->engine.subdev,
                                nvdec->engine.subdev.name, addr, &nvdec->falcon);
-};
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c
new file mode 100644 (file)
index 0000000..9329342
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ga100_nvdec = {
+       .sclass = {
+               { -1, -1, NVC6B0_VIDEO_DECODER },
+               {}
+       }
+};
+
+int
+ga100_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+               struct nvkm_nvdec **pnvdec)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_nvdec_new(&ga100_nvdec, device, type, inst, pnvdec);
+
+       return -ENODEV;
+}
index 37d8c3c0f3abc08bb2f442d3c664bc624827dee8..022a9c824304372c4ceb0af1db31af806fd4acd1 100644 (file)
  */
 #include "priv.h"
 
-#include <subdev/mc.h>
-#include <subdev/timer.h>
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ga102_nvdec_gsp = {
+       .sclass = {
+               { -1, -1, NVC7B0_VIDEO_DECODER },
+               {}
+       }
+};
 
 static const struct nvkm_falcon_func
 ga102_nvdec_flcn = {
@@ -57,5 +66,8 @@ int
 ga102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
                struct nvkm_nvdec **pnvdec)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_nvdec_new(&ga102_nvdec_gsp, device, type, inst, pnvdec);
+
        return nvkm_nvdec_new_(ga102_nvdec_fwif, device, type, inst, 0x848000, pnvdec);
 }
index 564f7e8960a2016d1af1cade0bbf16156c15f862..51c9d0e68ee41acb6879fd80a19d8aee64de8432 100644 (file)
@@ -44,7 +44,7 @@ gm107_nvdec_nofw(struct nvkm_nvdec *nvdec, int ver,
        return 0;
 }
 
-static const struct nvkm_nvdec_fwif
+const struct nvkm_nvdec_fwif
 gm107_nvdec_fwif[] = {
        { -1, gm107_nvdec_nofw, &gm107_nvdec },
        {}
index 61e1f7aaa50933ae2ef32bd699fad679bd5c03fe..f506ae83bfd73f6358102c4e7bc62815c87cbdb6 100644 (file)
@@ -5,6 +5,8 @@
 
 struct nvkm_nvdec_func {
        const struct nvkm_falcon_func *flcn;
+
+       struct nvkm_sclass sclass[];
 };
 
 struct nvkm_nvdec_fwif {
@@ -14,6 +16,11 @@ struct nvkm_nvdec_fwif {
        const struct nvkm_nvdec_func *func;
 };
 
+extern const struct nvkm_nvdec_fwif gm107_nvdec_fwif[];
+
 int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *,
                    enum nvkm_subdev_type, int, u32 addr, struct nvkm_nvdec **);
+
+int r535_nvdec_new(const struct nvkm_engine_func *, struct nvkm_device *,
+                  enum nvkm_subdev_type, int, struct nvkm_nvdec **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c
new file mode 100644 (file)
index 0000000..75a24f3
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/object.h>
+#include <subdev/gsp.h>
+#include <engine/fifo.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+
+struct r535_nvdec_obj {
+       struct nvkm_object object;
+       struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_nvdec_obj_dtor(struct nvkm_object *object)
+{
+       struct r535_nvdec_obj *obj = container_of(object, typeof(*obj), object);
+
+       nvkm_gsp_rm_free(&obj->rm);
+       return obj;
+}
+
+static const struct nvkm_object_func
+r535_nvdec_obj = {
+       .dtor = r535_nvdec_obj_dtor,
+};
+
+static int
+r535_nvdec_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+                struct nvkm_object **pobject)
+{
+       struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+       struct r535_nvdec_obj *obj;
+       NV_BSP_ALLOCATION_PARAMETERS *args;
+
+       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_nvdec_obj, oclass, &obj->object);
+       *pobject = &obj->object;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
+                                    sizeof(*args), &obj->rm);
+       if (WARN_ON(IS_ERR(args)))
+               return PTR_ERR(args);
+
+       args->size = sizeof(*args);
+       args->engineInstance = oclass->engine->subdev.inst;
+
+       return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
+}
+
+static void *
+r535_nvdec_dtor(struct nvkm_engine *engine)
+{
+       struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
+
+       kfree(nvdec->engine.func);
+       return nvdec;
+}
+
+int
+r535_nvdec_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
+              enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec)
+{
+       struct nvkm_engine_func *rm;
+       int nclass;
+
+       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_nvdec_dtor;
+       for (int i = 0; i < nclass; i++) {
+               rm->sclass[i].minver = hw->sclass[i].minver;
+               rm->sclass[i].maxver = hw->sclass[i].maxver;
+               rm->sclass[i].oclass = hw->sclass[i].oclass;
+               rm->sclass[i].ctor = r535_nvdec_obj_ctor;
+       }
+
+       if (!(*pnvdec = kzalloc(sizeof(**pnvdec), GFP_KERNEL))) {
+               kfree(rm);
+               return -ENOMEM;
+       }
+
+       return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvdec)->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c
new file mode 100644 (file)
index 0000000..808c8e0
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+tu102_nvdec = {
+       .sclass = {
+               { -1, -1, NVC4B0_VIDEO_DECODER },
+               {}
+       }
+};
+
+int
+tu102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+               struct nvkm_nvdec **pnvdec)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_nvdec_new(&tu102_nvdec, device, type, inst, pnvdec);
+
+       return nvkm_nvdec_new_(gm107_nvdec_fwif, device, type, inst, 0, pnvdec);
+}
index 75bf4436bf3fe1fc039c43e0064dc82262ac2bc0..2c1495b730f3b4fc4ad4a034a97706c8bb3f481d 100644 (file)
@@ -1,3 +1,8 @@
 # SPDX-License-Identifier: MIT
 nvkm-y += nvkm/engine/nvenc/base.o
 nvkm-y += nvkm/engine/nvenc/gm107.o
+nvkm-y += nvkm/engine/nvenc/tu102.o
+nvkm-y += nvkm/engine/nvenc/ga102.o
+nvkm-y += nvkm/engine/nvenc/ad102.o
+
+nvkm-y += nvkm/engine/nvenc/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c
new file mode 100644 (file)
index 0000000..1b4619f
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ad102_nvenc = {
+       .sclass = {
+               { -1, -1, NVC9B7_VIDEO_ENCODER },
+               {}
+       }
+};
+
+int
+ad102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+               struct nvkm_nvenc **pnvenc)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_nvenc_new(&ad102_nvenc, device, type, inst, pnvenc);
+
+       return -ENODEV;
+}
index cf5dcfda7b2538fbcbaafe0095292bf272ab8d95..d45dbb42a0dbf02446d2ad4a0ab27791bed5cedd 100644 (file)
@@ -34,6 +34,7 @@ nvkm_nvenc_dtor(struct nvkm_engine *engine)
 static const struct nvkm_engine_func
 nvkm_nvenc = {
        .dtor = nvkm_nvenc_dtor,
+       .sclass = { {} },
 };
 
 int
@@ -59,4 +60,4 @@ nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device,
 
        return nvkm_falcon_ctor(nvenc->func->flcn, &nvenc->engine.subdev,
                                nvenc->engine.subdev.name, 0, &nvenc->falcon);
-};
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c
new file mode 100644 (file)
index 0000000..6463ab8
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ga102_nvenc = {
+       .sclass = {
+               { -1, -1, NVC7B7_VIDEO_ENCODER },
+               {}
+       }
+};
+
+int
+ga102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+               struct nvkm_nvenc **pnvenc)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_nvenc_new(&ga102_nvenc, device, type, inst, pnvenc);
+
+       return -ENODEV;
+}
index ad27d8b97569686186ebadca0073b5841d6a61be..922abb647ad3504f9283ca7cbfdf582da909c31d 100644 (file)
@@ -38,7 +38,7 @@ gm107_nvenc_nofw(struct nvkm_nvenc *nvenc, int ver,
        return 0;
 }
 
-static const struct nvkm_nvenc_fwif
+const struct nvkm_nvenc_fwif
 gm107_nvenc_fwif[] = {
        { -1, gm107_nvenc_nofw, &gm107_nvenc },
        {}
index 4130a2bfbb4f79b060eae00eb552f26a26a058d9..7917affc6505a8ecf965d475dc8f5fecedb45bbe 100644 (file)
@@ -14,6 +14,11 @@ struct nvkm_nvenc_fwif {
        const struct nvkm_nvenc_func *func;
 };
 
+extern const struct nvkm_nvenc_fwif gm107_nvenc_fwif[];
+
 int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *, enum nvkm_subdev_type,
                    int, struct nvkm_nvenc **pnvenc);
+
+int r535_nvenc_new(const struct nvkm_engine_func *, struct nvkm_device *,
+                  enum nvkm_subdev_type, int, struct nvkm_nvenc **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c
new file mode 100644 (file)
index 0000000..c8a2a91
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/object.h>
+#include <subdev/gsp.h>
+#include <engine/fifo.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+
+struct r535_nvenc_obj {
+       struct nvkm_object object;
+       struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_nvenc_obj_dtor(struct nvkm_object *object)
+{
+       struct r535_nvenc_obj *obj = container_of(object, typeof(*obj), object);
+
+       nvkm_gsp_rm_free(&obj->rm);
+       return obj;
+}
+
+static const struct nvkm_object_func
+r535_nvenc_obj = {
+       .dtor = r535_nvenc_obj_dtor,
+};
+
+static int
+r535_nvenc_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+                struct nvkm_object **pobject)
+{
+       struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+       struct r535_nvenc_obj *obj;
+       NV_MSENC_ALLOCATION_PARAMETERS *args;
+
+       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_nvenc_obj, oclass, &obj->object);
+       *pobject = &obj->object;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
+                                    sizeof(*args), &obj->rm);
+       if (WARN_ON(IS_ERR(args)))
+               return PTR_ERR(args);
+
+       args->size = sizeof(*args);
+       args->engineInstance = oclass->engine->subdev.inst;
+
+       return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
+}
+
+static void *
+r535_nvenc_dtor(struct nvkm_engine *engine)
+{
+       struct nvkm_nvenc *nvenc = nvkm_nvenc(engine);
+
+       kfree(nvenc->engine.func);
+       return nvenc;
+}
+
+int
+r535_nvenc_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
+              enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc)
+{
+       struct nvkm_engine_func *rm;
+       int nclass;
+
+       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_nvenc_dtor;
+       for (int i = 0; i < nclass; i++) {
+               rm->sclass[i].minver = hw->sclass[i].minver;
+               rm->sclass[i].maxver = hw->sclass[i].maxver;
+               rm->sclass[i].oclass = hw->sclass[i].oclass;
+               rm->sclass[i].ctor = r535_nvenc_obj_ctor;
+       }
+
+       if (!(*pnvenc = kzalloc(sizeof(**pnvenc), GFP_KERNEL))) {
+               kfree(rm);
+               return -ENOMEM;
+       }
+
+       return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvenc)->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c
new file mode 100644 (file)
index 0000000..9338644
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+tu102_nvenc = {
+       .sclass = {
+               { -1, -1, NVC4B7_VIDEO_ENCODER },
+               {}
+       }
+};
+
+int
+tu102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+               struct nvkm_nvenc **pnvenc)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_nvenc_new(&tu102_nvenc, device, type, inst, pnvenc);
+
+       return nvkm_nvenc_new_(gm107_nvenc_fwif, device, type, inst, pnvenc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild
new file mode 100644 (file)
index 0000000..1408f66
--- /dev/null
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/engine/nvjpg/ga100.o
+nvkm-y += nvkm/engine/nvjpg/ad102.o
+
+nvkm-y += nvkm/engine/nvjpg/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c
new file mode 100644 (file)
index 0000000..62705dc
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ad102_nvjpg = {
+       .sclass = {
+               { -1, -1, NVC9D1_VIDEO_NVJPG },
+               {}
+       }
+};
+
+int
+ad102_nvjpg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+               struct nvkm_engine **pengine)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_nvjpg_new(&ad102_nvjpg, device, type, inst, pengine);
+
+       return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c
new file mode 100644 (file)
index 0000000..f550eb0
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ga100_nvjpg = {
+       .sclass = {
+               { -1, -1, NVC4D1_VIDEO_NVJPG },
+               {}
+       }
+};
+
+int
+ga100_nvjpg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+               struct nvkm_engine **pengine)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_nvjpg_new(&ga100_nvjpg, device, type, inst, pengine);
+
+       return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h
new file mode 100644 (file)
index 0000000..1e80cf7
--- /dev/null
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_NVJPG_PRIV_H__
+#define __NVKM_NVJPG_PRIV_H__
+#include <engine/nvjpg.h>
+
+int r535_nvjpg_new(const struct nvkm_engine_func *, struct nvkm_device *,
+                  enum nvkm_subdev_type, int, struct nvkm_engine **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c
new file mode 100644 (file)
index 0000000..1babddc
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/object.h>
+#include <subdev/gsp.h>
+#include <engine/fifo.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+
+struct r535_nvjpg_obj {
+       struct nvkm_object object;
+       struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_nvjpg_obj_dtor(struct nvkm_object *object)
+{
+       struct r535_nvjpg_obj *obj = container_of(object, typeof(*obj), object);
+
+       nvkm_gsp_rm_free(&obj->rm);
+       return obj;
+}
+
+static const struct nvkm_object_func
+r535_nvjpg_obj = {
+       .dtor = r535_nvjpg_obj_dtor,
+};
+
+static int
+r535_nvjpg_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+                   struct nvkm_object **pobject)
+{
+       struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+       struct r535_nvjpg_obj *obj;
+       NV_NVJPG_ALLOCATION_PARAMETERS *args;
+
+       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_nvjpg_obj, oclass, &obj->object);
+       *pobject = &obj->object;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
+                                    sizeof(*args), &obj->rm);
+       if (WARN_ON(IS_ERR(args)))
+               return PTR_ERR(args);
+
+       args->size = sizeof(*args);
+       args->engineInstance = oclass->engine->subdev.inst;
+
+       return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
+}
+
+static void *
+r535_nvjpg_dtor(struct nvkm_engine *engine)
+{
+       kfree(engine->func);
+       return engine;
+}
+
+int
+r535_nvjpg_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
+              enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
+{
+       struct nvkm_engine_func *rm;
+       int nclass, ret;
+
+       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_nvjpg_dtor;
+       for (int i = 0; i < nclass; i++) {
+               rm->sclass[i].minver = hw->sclass[i].minver;
+               rm->sclass[i].maxver = hw->sclass[i].maxver;
+               rm->sclass[i].oclass = hw->sclass[i].oclass;
+               rm->sclass[i].ctor = r535_nvjpg_obj_ctor;
+       }
+
+       ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild
new file mode 100644 (file)
index 0000000..99f1713
--- /dev/null
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/engine/ofa/ga100.o
+nvkm-y += nvkm/engine/ofa/ga102.o
+nvkm-y += nvkm/engine/ofa/ad102.o
+
+nvkm-y += nvkm/engine/ofa/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c
new file mode 100644 (file)
index 0000000..7ac87ef
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ad102_ofa = {
+       .sclass = {
+               { -1, -1, NVC9FA_VIDEO_OFA },
+               {}
+       }
+};
+
+int
+ad102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+             struct nvkm_engine **pengine)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_ofa_new(&ad102_ofa, device, type, inst, pengine);
+
+       return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c
new file mode 100644 (file)
index 0000000..ef474f6
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ga100_ofa = {
+       .sclass = {
+               { -1, -1, NVC6FA_VIDEO_OFA },
+               {}
+       }
+};
+
+int
+ga100_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+             struct nvkm_engine **pengine)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_ofa_new(&ga100_ofa, device, type, inst, pengine);
+
+       return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c
new file mode 100644 (file)
index 0000000..bea2555
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ga102_ofa = {
+       .sclass = {
+               { -1, -1, NVC7FA_VIDEO_OFA },
+               {}
+       }
+};
+
+int
+ga102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+             struct nvkm_engine **pengine)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_ofa_new(&ga102_ofa, device, type, inst, pengine);
+
+       return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h
new file mode 100644 (file)
index 0000000..caf29e6
--- /dev/null
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_OFA_PRIV_H__
+#define __NVKM_OFA_PRIV_H__
+#include <engine/ofa.h>
+
+int r535_ofa_new(const struct nvkm_engine_func *, struct nvkm_device *,
+                enum nvkm_subdev_type, int, struct nvkm_engine **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c
new file mode 100644 (file)
index 0000000..438dc69
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/object.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu.h>
+#include <engine/fifo.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+
+struct r535_ofa_obj {
+       struct nvkm_object object;
+       struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_ofa_obj_dtor(struct nvkm_object *object)
+{
+       struct r535_ofa_obj *obj = container_of(object, typeof(*obj), object);
+
+       nvkm_gsp_rm_free(&obj->rm);
+       return obj;
+}
+
+static const struct nvkm_object_func
+r535_ofa_obj = {
+       .dtor = r535_ofa_obj_dtor,
+};
+
+static int
+r535_ofa_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+                struct nvkm_object **pobject)
+{
+       struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+       struct r535_ofa_obj *obj;
+       NV_OFA_ALLOCATION_PARAMETERS *args;
+
+       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_ofa_obj, oclass, &obj->object);
+       *pobject = &obj->object;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
+                                    sizeof(*args), &obj->rm);
+       if (WARN_ON(IS_ERR(args)))
+               return PTR_ERR(args);
+
+       args->size = sizeof(*args);
+
+       return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
+}
+
+static void *
+r535_ofa_dtor(struct nvkm_engine *engine)
+{
+       kfree(engine->func);
+       return engine;
+}
+
+int
+r535_ofa_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
+            enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
+{
+       struct nvkm_engine_func *rm;
+       int nclass, ret;
+
+       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_ofa_dtor;
+       for (int i = 0; i < nclass; i++) {
+               rm->sclass[i].minver = hw->sclass[i].minver;
+               rm->sclass[i].maxver = hw->sclass[i].maxver;
+               rm->sclass[i].oclass = hw->sclass[i].oclass;
+               rm->sclass[i].ctor = r535_ofa_obj_ctor;
+       }
+
+       ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
index 19feadb1f67b4a3acdd333193178d9f7ba43d7a4..b43b7e5e2733e4a5a4997b3e5e832f8dbc197345 100644 (file)
@@ -4,3 +4,5 @@ nvkm-y += nvkm/engine/sec2/gp102.o
 nvkm-y += nvkm/engine/sec2/gp108.o
 nvkm-y += nvkm/engine/sec2/tu102.o
 nvkm-y += nvkm/engine/sec2/ga102.o
+
+nvkm-y += nvkm/engine/sec2/r535.o
index 945abb8156d72657ad48807c93d64b3e6b1c855d..54be7596b046f925d1a7427c5eff4e729efbba24 100644 (file)
@@ -21,6 +21,7 @@
  */
 #include "priv.h"
 #include <subdev/acr.h>
+#include <subdev/gsp.h>
 #include <subdev/vfn.h>
 
 #include <nvfw/flcn.h>
@@ -193,5 +194,10 @@ ga102_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
        /* TOP info wasn't updated on Turing to reflect the PRI
         * address change for some reason.  We override it here.
         */
-       return nvkm_sec2_new_(ga102_sec2_fwif, device, type, inst, 0x840000, psec2);
+       const u32 addr = 0x840000;
+
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_sec2_new(&ga102_sec2, device, type, inst, addr, psec2);
+
+       return nvkm_sec2_new_(ga102_sec2_fwif, device, type, inst, addr, psec2);
 }
index 172d2705c199234484939762ce0e80752bf527d8..e158a40a4f09a6510c1dd49e2376d3e2d8b5af11 100644 (file)
@@ -4,6 +4,9 @@
 #include <engine/sec2.h>
 struct nvkm_acr_lsfw;
 
+int r535_sec2_new(const struct nvkm_sec2_func *,
+                 struct nvkm_device *, enum nvkm_subdev_type, int, u32 addr, struct nvkm_sec2 **);
+
 struct nvkm_sec2_func {
        const struct nvkm_falcon_func *flcn;
        u8 unit_unload;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/r535.c
new file mode 100644 (file)
index 0000000..83a6bad
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void *
+r535_sec2_dtor(struct nvkm_engine *engine)
+{
+       struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
+
+       nvkm_falcon_dtor(&sec2->falcon);
+       return sec2;
+}
+
+static const struct nvkm_engine_func
+r535_sec2 = {
+       .dtor = r535_sec2_dtor,
+};
+
+int
+r535_sec2_new(const struct nvkm_sec2_func *func, struct nvkm_device *device,
+             enum nvkm_subdev_type type, int inst, u32 addr, struct nvkm_sec2 **psec2)
+{
+       struct nvkm_sec2 *sec2;
+       int ret;
+
+       if (!(sec2 = *psec2 = kzalloc(sizeof(*sec2), GFP_KERNEL)))
+               return -ENOMEM;
+
+       ret = nvkm_engine_ctor(&r535_sec2, device, type, inst, true, &sec2->engine);
+       if (ret)
+               return ret;
+
+       return nvkm_falcon_ctor(func->flcn, &sec2->engine.subdev, sec2->engine.subdev.name,
+                               addr, &sec2->falcon);
+}
index 0afc4b2fa5294f917d12e5248a22dd7838f058ec..20452046d7d1ffc3f50a6eff70ccf91b4c1bfaa5 100644 (file)
@@ -21,6 +21,7 @@
  */
 #include "priv.h"
 #include <subdev/acr.h>
+#include <subdev/gsp.h>
 
 #include <nvfw/sec2.h>
 
@@ -82,5 +83,10 @@ tu102_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
        /* TOP info wasn't updated on Turing to reflect the PRI
         * address change for some reason.  We override it here.
         */
-       return nvkm_sec2_new_(tu102_sec2_fwif, device, type, inst, 0x840000, psec2);
+       const u32 addr = 0x840000;
+
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_sec2_new(&tu102_sec2, device, type, inst, addr, psec2);
+
+       return nvkm_sec2_new_(tu102_sec2_fwif, device, type, inst, addr, psec2);
 }
index 9ffe7b921ccb528d712efc94076d8655dd57fbcd..d6b0155644016392eefbc882a89a8ea70d8fd8cc 100644 (file)
@@ -8,5 +8,6 @@ nvkm-y += nvkm/falcon/v1.o
 
 nvkm-y += nvkm/falcon/gm200.o
 nvkm-y += nvkm/falcon/gp102.o
+nvkm-y += nvkm/falcon/tu102.o
 nvkm-y += nvkm/falcon/ga100.o
 nvkm-y += nvkm/falcon/ga102.o
index 235149f73a690d8dfef496c84e20774581e7e79e..3b790865aece9bc4d0afebb5ad61058971dc5b76 100644 (file)
 #include <subdev/timer.h>
 #include <subdev/top.h>
 
+void
+nvkm_falcon_intr_retrigger(struct nvkm_falcon *falcon)
+{
+       if (falcon->func->intr_retrigger)
+               falcon->func->intr_retrigger(falcon);
+}
+
+bool
+nvkm_falcon_riscv_active(struct nvkm_falcon *falcon)
+{
+       if (!falcon->func->riscv_active)
+               return false;
+
+       return falcon->func->riscv_active(falcon);
+}
+
 static const struct nvkm_falcon_func_dma *
 nvkm_falcon_dma(struct nvkm_falcon *falcon, enum nvkm_falcon_mem *mem_type, u32 *mem_base)
 {
index 49fd32943916f31e00e8760652220144914bf7ec..5db94fb10afcc1cef590eca539f1cc6e7243b20d 100644 (file)
  */
 #include "priv.h"
 
+void
+ga100_flcn_intr_retrigger(struct nvkm_falcon *falcon)
+{
+       nvkm_falcon_wr32(falcon, 0x3e8, 0x00000001);
+}
+
 int
 ga100_flcn_fw_signature(struct nvkm_falcon_fw *fw, u32 *src_base_src)
 {
index 0ff450fe359081e25742ad8af67c2236961a14da..834afa45f2fdb821ee38244ad0a03a73b2baf37e 100644 (file)
 #include <subdev/mc.h>
 #include <subdev/timer.h>
 
+bool
+ga102_flcn_riscv_active(struct nvkm_falcon *falcon)
+{
+       return (nvkm_falcon_rd32(falcon, falcon->addr2 + 0x388) & 0x00000080) != 0;
+}
+
 static bool
 ga102_flcn_dma_done(struct nvkm_falcon *falcon)
 {
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/tu102.c b/drivers/gpu/drm/nouveau/nvkm/falcon/tu102.c
new file mode 100644 (file)
index 0000000..3999182
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+bool
+tu102_flcn_riscv_active(struct nvkm_falcon *falcon)
+{
+       return (nvkm_falcon_rd32(falcon, falcon->addr2 + 0x240) & 0x00000001) != 0;
+}
index 45dcf493e972978c7368913ce26add89c5471cca..c7d38609bb7eb55d92287e571581ddb4fa9bd9ac 100644 (file)
@@ -20,6 +20,7 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 #include "priv.h"
+#include <subdev/gsp.h>
 
 #include <nvfw/acr.h>
 
@@ -322,5 +323,8 @@ int
 ga102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_acr **pacr)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_acr_new_(ga102_acr_fwif, device, type, inst, pacr);
 }
index c22d551c0078104d8374f162faa0c1f900c7b55b..565e9a070b23f38c884b677edc0c2e95e6f1c473 100644 (file)
@@ -201,5 +201,8 @@ int
 tu102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_acr **pacr)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_acr_new_(tu102_acr_fwif, device, type, inst, pacr);
 }
index 8faee3317a74fbb13fc411c8266275d495ccc49a..9754c6872543cdd2c0e02509c7a17be02992ff42 100644 (file)
@@ -7,3 +7,5 @@ nvkm-y += nvkm/subdev/bar/gk20a.o
 nvkm-y += nvkm/subdev/bar/gm107.o
 nvkm-y += nvkm/subdev/bar/gm20b.o
 nvkm-y += nvkm/subdev/bar/tu102.o
+
+nvkm-y += nvkm/subdev/bar/r535.o
index d017a1b5e5dd55f23ce0ef7d752727b07ac86ab1..91bc53be97ffc228c243e675c039b8c53bdb8aab 100644 (file)
@@ -93,8 +93,16 @@ static int
 nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend)
 {
        struct nvkm_bar *bar = nvkm_bar(subdev);
+
+       if (!subdev->use.enabled)
+               return 0;
+
        if (bar->func->bar1.fini)
                bar->func->bar1.fini(bar);
+
+       if (!suspend) /* Handled by instmem. */
+               nvkm_bar_bar2_fini(subdev->device);
+
        return 0;
 }
 
@@ -120,7 +128,7 @@ static void *
 nvkm_bar_dtor(struct nvkm_subdev *subdev)
 {
        struct nvkm_bar *bar = nvkm_bar(subdev);
-       nvkm_bar_bar2_fini(subdev->device);
+
        return bar->func->dtor(bar);
 }
 
index daebfc991c76e6b8e3f1a004a7e364807b2dfac5..d0168e0b78fb745b3d7ff66e14c1b63f2c1d9ec0 100644 (file)
@@ -4,6 +4,9 @@
 #define nvkm_bar(p) container_of((p), struct nvkm_bar, subdev)
 #include <subdev/bar.h>
 
+int r535_bar_new_(const struct nvkm_bar_func *,
+                 struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+
 void nvkm_bar_ctor(const struct nvkm_bar_func *, struct nvkm_device *,
                   enum nvkm_subdev_type, int, struct nvkm_bar *);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
new file mode 100644 (file)
index 0000000..4135690
--- /dev/null
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <core/mm.h>
+#include <subdev/fb.h>
+#include <subdev/gsp.h>
+#include <subdev/instmem.h>
+#include <subdev/mmu/vmm.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
+
+static void
+r535_bar_flush(struct nvkm_bar *bar)
+{
+       ioread32_native(bar->flushBAR2);
+}
+
+static void
+r535_bar_bar2_wait(struct nvkm_bar *base)
+{
+}
+
+static int
+r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr)
+{
+       rpc_update_bar_pde_v15_00 *rpc;
+
+       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UPDATE_BAR_PDE, sizeof(*rpc));
+       if (WARN_ON(IS_ERR_OR_NULL(rpc)))
+               return -EIO;
+
+       rpc->info.barType = NV_RPC_UPDATE_PDE_BAR_2;
+       rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */
+       rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu!
+
+       return nvkm_gsp_rpc_wr(gsp, rpc, true);
+}
+
+static void
+r535_bar_bar2_fini(struct nvkm_bar *bar)
+{
+       struct nvkm_gsp *gsp = bar->subdev.device->gsp;
+
+       bar->flushBAR2 = bar->flushBAR2PhysMode;
+       nvkm_done(bar->flushFBZero);
+
+       WARN_ON(r535_bar_bar2_update_pde(gsp, 0));
+}
+
+static void
+r535_bar_bar2_init(struct nvkm_bar *bar)
+{
+       struct nvkm_device *device = bar->subdev.device;
+       struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm;
+       struct nvkm_gsp *gsp = device->gsp;
+
+       WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->pd->pde[0]->pt[0]->addr));
+       vmm->rm.bar2_pdb = gsp->bar.rm_bar2_pdb;
+
+       if (!bar->flushFBZero) {
+               struct nvkm_memory *fbZero;
+               int ret;
+
+               ret = nvkm_ram_wrap(device, 0, 0x1000, &fbZero);
+               if (ret == 0) {
+                       ret = nvkm_memory_kmap(fbZero, &bar->flushFBZero);
+                       nvkm_memory_unref(&fbZero);
+               }
+               WARN_ON(ret);
+       }
+
+       bar->bar2 = true;
+       bar->flushBAR2 = nvkm_kmap(bar->flushFBZero);
+       WARN_ON(!bar->flushBAR2);
+}
+
+static void
+r535_bar_bar1_wait(struct nvkm_bar *base)
+{
+}
+
+static void
+r535_bar_bar1_fini(struct nvkm_bar *base)
+{
+}
+
+static void
+r535_bar_bar1_init(struct nvkm_bar *bar)
+{
+       struct nvkm_device *device = bar->subdev.device;
+       struct nvkm_gsp *gsp = device->gsp;
+       struct nvkm_vmm *vmm = gf100_bar(bar)->bar[1].vmm;
+       struct nvkm_memory *pd3;
+       int ret;
+
+       ret = nvkm_ram_wrap(device, gsp->bar.rm_bar1_pdb, 0x1000, &pd3);
+       if (WARN_ON(ret))
+               return;
+
+       nvkm_memory_unref(&vmm->pd->pt[0]->memory);
+
+       ret = nvkm_memory_kmap(pd3, &vmm->pd->pt[0]->memory);
+       nvkm_memory_unref(&pd3);
+       if (WARN_ON(ret))
+               return;
+
+       vmm->pd->pt[0]->addr = nvkm_memory_addr(vmm->pd->pt[0]->memory);
+}
+
+static void *
+r535_bar_dtor(struct nvkm_bar *bar)
+{
+       void *data = gf100_bar_dtor(bar);
+
+       nvkm_memory_unref(&bar->flushFBZero);
+
+       if (bar->flushBAR2PhysMode)
+               iounmap(bar->flushBAR2PhysMode);
+
+       kfree(bar->func);
+       return data;
+}
+
+int
+r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device,
+             enum nvkm_subdev_type type, int inst, struct nvkm_bar **pbar)
+{
+       struct nvkm_bar_func *rm;
+       struct nvkm_bar *bar;
+       int ret;
+
+       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_bar_dtor;
+       rm->oneinit = hw->oneinit;
+       rm->bar1.init = r535_bar_bar1_init;
+       rm->bar1.fini = r535_bar_bar1_fini;
+       rm->bar1.wait = r535_bar_bar1_wait;
+       rm->bar1.vmm = hw->bar1.vmm;
+       rm->bar2.init = r535_bar_bar2_init;
+       rm->bar2.fini = r535_bar_bar2_fini;
+       rm->bar2.wait = r535_bar_bar2_wait;
+       rm->bar2.vmm = hw->bar2.vmm;
+       rm->flush = r535_bar_flush;
+
+       ret = gf100_bar_new_(rm, device, type, inst, &bar);
+       *pbar = bar;
+       if (ret) {
+               if (!bar)
+                       kfree(rm);
+               return ret;
+       }
+
+       bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE);
+       if (!bar->flushBAR2PhysMode)
+               return -ENOMEM;
+
+       bar->flushBAR2 = bar->flushBAR2PhysMode;
+
+       gf100_bar(*pbar)->bar2_halve = true;
+       return 0;
+}
index c25ab407b85d9182ff26ba361ac23dadde8b87a0..b4196edad5b8d029d4b08ada0eaf9fa0855cacc1 100644 (file)
@@ -22,6 +22,7 @@
 #include "gf100.h"
 
 #include <core/memory.h>
+#include <subdev/gsp.h>
 #include <subdev/timer.h>
 
 static void
@@ -95,5 +96,8 @@ int
 tu102_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_bar **pbar)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_bar_new_(&tu102_bar, device, type, inst, pbar);
+
        return gf100_bar_new_(&tu102_bar, device, type, inst, pbar);
 }
index 6c318e41bde042609a4de03e3b72152eb3644c22..91f486ee4c42d771f41da1a91a686d658e978a78 100644 (file)
@@ -46,6 +46,14 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
        return true;
 }
 
+void *
+nvbios_pointer(struct nvkm_bios *bios, u32 addr)
+{
+       if (likely(nvbios_addr(bios, &addr, 0)))
+               return &bios->data[addr];
+       return NULL;
+}
+
 u8
 nvbios_rd08(struct nvkm_bios *bios, u32 addr)
 {
index 80b5aaceeaad17f6dde872cdac8c641dec6ce182..8e1e0b057a0b42c61bfe40ffbf0b09fb72bd753e 100644 (file)
@@ -24,6 +24,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static void
 gf100_bus_intr(struct nvkm_bus *bus)
 {
@@ -72,5 +74,8 @@ int
 gf100_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_bus **pbus)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_bus_new_(&gf100_bus, device, type, inst, pbus);
 }
index d1abb64841dac7ad969e4ab1ff688c03c5060095..5f97bffca979aeec56009e7ec0b0ccf77bdad1ff 100644 (file)
@@ -16,3 +16,5 @@ nvkm-y += nvkm/subdev/devinit/gm200.o
 nvkm-y += nvkm/subdev/devinit/gv100.o
 nvkm-y += nvkm/subdev/devinit/tu102.o
 nvkm-y += nvkm/subdev/devinit/ga100.o
+
+nvkm-y += nvkm/subdev/devinit/r535.o
index 6b280b05c4ca07c9bedf81b76c26827bbf2f937f..5f0b12a1fc38769dc138923db28c779cd37e8151 100644 (file)
@@ -24,6 +24,7 @@
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 #include <subdev/clk/pll.h>
+#include <subdev/gsp.h>
 
 static int
 ga100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
@@ -62,8 +63,19 @@ ga100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
        return ret;
 }
 
+static void
+ga100_devinit_disable(struct nvkm_devinit *init)
+{
+       struct nvkm_device *device = init->subdev.device;
+       u32 r820c04 = nvkm_rd32(device, 0x820c04);
+
+       if (r820c04 & 0x00000001)
+               nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
+}
+
 static const struct nvkm_devinit_func
 ga100_devinit = {
+       .disable = ga100_devinit_disable,
        .init = nv50_devinit_init,
        .post = tu102_devinit_post,
        .pll_set = ga100_devinit_pll_set,
@@ -73,5 +85,8 @@ int
 ga100_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
                  struct nvkm_devinit **pinit)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_devinit_new(&ga100_devinit, device, type, inst, pinit);
+
        return nv50_devinit_new_(&ga100_devinit, device, type, inst, pinit);
 }
index a648482d06e912e0543ad55a1899d6bb2c8278d1..06bbfdcc788cf5f5ca041ec9c849c9439f1c4ca9 100644 (file)
@@ -4,6 +4,9 @@
 #define nvkm_devinit(p) container_of((p), struct nvkm_devinit, subdev)
 #include <subdev/devinit.h>
 
+int r535_devinit_new(const struct nvkm_devinit_func *,
+                    struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+
 struct nvkm_devinit_func {
        void *(*dtor)(struct nvkm_devinit *);
        void (*preinit)(struct nvkm_devinit *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
new file mode 100644 (file)
index 0000000..666eb93
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+
+static void *
+r535_devinit_dtor(struct nvkm_devinit *devinit)
+{
+       kfree(devinit->func);
+       return devinit;
+}
+
+int
+r535_devinit_new(const struct nvkm_devinit_func *hw,
+                struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+                struct nvkm_devinit **pdevinit)
+{
+       struct nvkm_devinit_func *rm;
+       int ret;
+
+       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_devinit_dtor;
+       rm->post = hw->post;
+       rm->disable = hw->disable;
+
+       ret = nv50_devinit_new_(rm, device, type, inst, pdevinit);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
index 40997ad1d101c59ac9fbd0d460a163534099ed38..f406b1525a4affa66f5dde71cba8604e3e8f4d3d 100644 (file)
@@ -24,6 +24,7 @@
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 #include <subdev/clk/pll.h>
+#include <subdev/gsp.h>
 
 static int
 tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
@@ -100,5 +101,8 @@ int
 tu102_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
                  struct nvkm_devinit **pinit)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_devinit_new(&tu102_devinit, device, type, inst, pinit);
+
        return nv50_devinit_new_(&tu102_devinit, device, type, inst, pinit);
 }
index 967efaddae281261656577d8b188e96d8319a435..5390417a58b5d526e5874ac9c6007790170c3e44 100644 (file)
@@ -22,6 +22,7 @@
 #include "priv.h"
 
 #include <core/memory.h>
+#include <subdev/gsp.h>
 #include <subdev/mc.h>
 #include <subdev/mmu.h>
 #include <subdev/vfn.h>
@@ -175,7 +176,12 @@ int
 tu102_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
                struct nvkm_fault **pfault)
 {
-       int ret = nvkm_fault_new_(&tu102_fault, device, type, inst, pfault);
+       int ret;
+
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
+       ret = nvkm_fault_new_(&tu102_fault, device, type, inst, pfault);
        if (ret)
                return ret;
 
index 394c305e759ad8f008e2e0fa784a03f9aa9851c6..d1611ad3bf81c5155ac60046e989ade88e5f954f 100644 (file)
@@ -36,6 +36,8 @@ nvkm-y += nvkm/subdev/fb/tu102.o
 nvkm-y += nvkm/subdev/fb/ga100.o
 nvkm-y += nvkm/subdev/fb/ga102.o
 
+nvkm-y += nvkm/subdev/fb/r535.o
+
 nvkm-y += nvkm/subdev/fb/ram.o
 nvkm-y += nvkm/subdev/fb/ramnv04.o
 nvkm-y += nvkm/subdev/fb/ramnv10.o
index 12037fd4fdf27fd708c345700b7a297448fae4e9..e9e7c1d5c4c427f404037cea934904c46e19522e 100644 (file)
@@ -22,6 +22,8 @@
 #include "gf100.h"
 #include "ram.h"
 
+#include <subdev/gsp.h>
+
 static const struct nvkm_fb_func
 ga100_fb = {
        .dtor = gf100_fb_dtor,
@@ -38,5 +40,8 @@ ga100_fb = {
 int
 ga100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_fb_new(&ga100_fb, device, type, inst, pfb);
+
        return gf100_fb_new_(&ga100_fb, device, type, inst, pfb);
 }
index 76f6877b54c6f239cca61ebaef114d1b882c7e64..25f82b372bcab23e5528b0c34330adec0baa5e45 100644 (file)
@@ -22,6 +22,7 @@
 #include "gf100.h"
 #include "ram.h"
 
+#include <subdev/gsp.h>
 #include <engine/nvdec.h>
 
 static u64
@@ -59,6 +60,9 @@ ga102_fb = {
 int
 ga102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_fb_new(&ga102_fb, device, type, inst, pfb);
+
        return gf100_fb_new_(&ga102_fb, device, type, inst, pfb);
 }
 
index 77d6a8c108298c78f4c6082ca4c2a2c65fa247b4..35c55dfba23d94725959324d30c93b960f36909a 100644 (file)
@@ -6,6 +6,9 @@
 #include <subdev/therm.h>
 struct nvkm_bios;
 
+int r535_fb_new(const struct nvkm_fb_func *,
+               struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+
 struct nvkm_fb_func {
        void *(*dtor)(struct nvkm_fb *);
        u32 (*tags)(struct nvkm_fb *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/r535.c
new file mode 100644 (file)
index 0000000..d325150
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "ram.h"
+
+#include <subdev/gsp.h>
+
+static const struct nvkm_ram_func
+r535_fb_ram = {
+};
+
+static int
+r535_fb_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+       struct nvkm_gsp *gsp = fb->subdev.device->gsp;
+       struct nvkm_ram *ram;
+       int ret;
+
+       if (!(ram = *pram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+               return -ENOMEM;
+
+       ram->func = &r535_fb_ram;
+       ram->fb = fb;
+       ram->type = NVKM_RAM_TYPE_UNKNOWN; /*TODO: pull this from GSP. */
+       ram->size = gsp->fb.size;
+       ram->stolen = false;
+       mutex_init(&ram->mutex);
+
+       for (int i = 0; i < gsp->fb.region_nr; i++) {
+               ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
+                                  gsp->fb.region[i].addr >> NVKM_RAM_MM_SHIFT,
+                                  gsp->fb.region[i].size >> NVKM_RAM_MM_SHIFT,
+                                  1);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static void *
+r535_fb_dtor(struct nvkm_fb *fb)
+{
+       kfree(fb->func);
+       return fb;
+}
+
+int
+r535_fb_new(const struct nvkm_fb_func *hw,
+           struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+{
+       struct nvkm_fb_func *rm;
+       int ret;
+
+       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_fb_dtor;
+       rm->sysmem.flush_page_init = hw->sysmem.flush_page_init;
+       rm->vidmem.size = hw->vidmem.size;
+       rm->ram_new = r535_fb_ram_new;
+
+       ret = nvkm_fb_new_(rm, device, type, inst, pfb);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
index 5c34416cb637951a4bc420d75f145813172028d6..c826980bf70ee1c67ef0e58905e3bba55860d0dc 100644 (file)
@@ -88,12 +88,20 @@ nvkm_vram_dtor(struct nvkm_memory *memory)
        struct nvkm_vram *vram = nvkm_vram(memory);
        struct nvkm_mm_node *next = vram->mn;
        struct nvkm_mm_node *node;
-       mutex_lock(&vram->ram->mutex);
-       while ((node = next)) {
-               next = node->next;
-               nvkm_mm_free(&vram->ram->vram, &node);
+
+       if (next) {
+               if (likely(next->nl_entry.next)){
+                       mutex_lock(&vram->ram->mutex);
+                       while ((node = next)) {
+                               next = node->next;
+                               nvkm_mm_free(&vram->ram->vram, &node);
+                       }
+                       mutex_unlock(&vram->ram->mutex);
+               } else {
+                       kfree(vram->mn);
+               }
        }
-       mutex_unlock(&vram->ram->mutex);
+
        return vram;
 }
 
@@ -108,6 +116,34 @@ nvkm_vram = {
        .kmap = nvkm_vram_kmap,
 };
 
+int
+nvkm_ram_wrap(struct nvkm_device *device, u64 addr, u64 size,
+             struct nvkm_memory **pmemory)
+{
+       struct nvkm_ram *ram;
+       struct nvkm_vram *vram;
+
+       if (!device->fb || !(ram = device->fb->ram))
+               return -ENODEV;
+       ram = device->fb->ram;
+
+       if (!(vram = kzalloc(sizeof(*vram), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_memory_ctor(&nvkm_vram, &vram->memory);
+       vram->ram = ram;
+       vram->page = NVKM_RAM_MM_SHIFT;
+       *pmemory = &vram->memory;
+
+       vram->mn = kzalloc(sizeof(*vram->mn), GFP_KERNEL);
+       if (!vram->mn)
+               return -ENOMEM;
+
+       vram->mn->offset = addr >> NVKM_RAM_MM_SHIFT;
+       vram->mn->length = size >> NVKM_RAM_MM_SHIFT;
+       return 0;
+}
+
 int
 nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size,
             bool contig, bool back, struct nvkm_memory **pmemory)
index bcc23d4c8115d1d6b44c0dc1776ce5bdaeb3bf6f..f7d2a749ce3fe9dd6174a7b5784bab231aea3830 100644 (file)
@@ -22,6 +22,8 @@
 #include "gf100.h"
 #include "ram.h"
 
+#include <subdev/gsp.h>
+
 bool
 tu102_fb_vpr_scrub_required(struct nvkm_fb *fb)
 {
@@ -46,6 +48,9 @@ tu102_fb = {
 int
 tu102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_fb_new(&tu102_fb, device, type, inst, pfb);
+
        return gf100_fb_new_(&tu102_fb, device, type, inst, pfb);
 }
 
index 7dc99492f536e7c90f5d16fc3c1bcd3c77ecd6b9..d621edbdff9d9ec90bd8656a58b46c4625a5dddb 100644 (file)
@@ -23,6 +23,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static u32
 gm107_fuse_read(struct nvkm_fuse *fuse, u32 addr)
 {
@@ -39,5 +41,8 @@ int
 gm107_fuse_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
               struct nvkm_fuse **pfuse)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_fuse_new_(&gm107_fuse, device, type, inst, pfuse);
 }
index 4a96f926b66df2758c40b1ac24317b60422389d8..4dbffae21ddcd1f58f5c28544e9df4c784556f24 100644 (file)
@@ -21,6 +21,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static void
 ga102_gpio_reset(struct nvkm_gpio *gpio, u8 match)
 {
@@ -115,5 +117,8 @@ int
 ga102_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
               struct nvkm_gpio **pgpio)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_gpio_new_(&ga102_gpio, device, type, inst, pgpio);
 }
index c0e4cdb45520be5b87d3887f2e13b7d47f8fe4cf..5f7063d5579bba2f5451db25f564276c8791d946 100644 (file)
@@ -23,6 +23,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static void
 gk104_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
 {
@@ -71,5 +73,8 @@ int
 gk104_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
               struct nvkm_gpio **pgpio)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_gpio_new_(&gk104_gpio, device, type, inst, pgpio);
 }
index 7f61a1ed158b98148d77180662854a6ff7dc988d..16bf2f1bb78014235e1349a0d2ad404a8750b94e 100644 (file)
@@ -1,4 +1,12 @@
 # SPDX-License-Identifier: MIT
 nvkm-y += nvkm/subdev/gsp/base.o
+nvkm-y += nvkm/subdev/gsp/fwsec.o
+
 nvkm-y += nvkm/subdev/gsp/gv100.o
+nvkm-y += nvkm/subdev/gsp/tu102.o
+nvkm-y += nvkm/subdev/gsp/tu116.o
+nvkm-y += nvkm/subdev/gsp/ga100.o
 nvkm-y += nvkm/subdev/gsp/ga102.o
+nvkm-y += nvkm/subdev/gsp/ad102.o
+
+nvkm-y += nvkm/subdev/gsp/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
new file mode 100644 (file)
index 0000000..c849c62
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static const struct nvkm_gsp_func
+ad102_gsp_r535_113_01 = {
+       .flcn = &ga102_gsp_flcn,
+       .fwsec = &ga102_gsp_fwsec,
+
+       .sig_section = ".fwsignature_ad10x",
+
+       .wpr_heap.os_carveout_size = 20 << 20,
+       .wpr_heap.base_size = 8 << 20,
+       .wpr_heap.min_size = 84 << 20,
+
+       .booter.ctor = ga102_gsp_booter_ctor,
+
+       .dtor = r535_gsp_dtor,
+       .oneinit = tu102_gsp_oneinit,
+       .init = r535_gsp_init,
+       .fini = r535_gsp_fini,
+       .reset = ga102_gsp_reset,
+
+       .rm = &r535_gsp_rm,
+};
+
+static struct nvkm_gsp_fwif
+ad102_gsps[] = {
+       { 0, r535_gsp_load, &ad102_gsp_r535_113_01, "535.113.01", true },
+       {}
+};
+
+int
+ad102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+             struct nvkm_gsp **pgsp)
+{
+       return nvkm_gsp_new_(ad102_gsps, device, type, inst, pgsp);
+}
index 591ac95c26699c2a67e7c8506401c493c23b43e0..04bceaa28a197d93d85db77098e9f8330c63cff0 100644 (file)
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 #include "priv.h"
-#include <core/falcon.h>
-#include <core/firmware.h>
-#include <subdev/acr.h>
-#include <subdev/top.h>
+
+int
+nvkm_gsp_intr_nonstall(struct nvkm_gsp *gsp, enum nvkm_subdev_type type, int inst)
+{
+       for (int i = 0; i < gsp->intr_nr; i++) {
+               if (gsp->intr[i].type == type && gsp->intr[i].inst == inst) {
+                       if (gsp->intr[i].nonstall != ~0)
+                               return gsp->intr[i].nonstall;
+
+                       return -EINVAL;
+               }
+       }
+
+       return -ENOENT;
+}
+
+int
+nvkm_gsp_intr_stall(struct nvkm_gsp *gsp, enum nvkm_subdev_type type, int inst)
+{
+       for (int i = 0; i < gsp->intr_nr; i++) {
+               if (gsp->intr[i].type == type && gsp->intr[i].inst == inst) {
+                       if (gsp->intr[i].stall != ~0)
+                               return gsp->intr[i].stall;
+
+                       return -EINVAL;
+               }
+       }
+
+       return -ENOENT;
+}
+
+static int
+nvkm_gsp_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+       struct nvkm_gsp *gsp = nvkm_gsp(subdev);
+
+       if (!gsp->func->fini)
+               return 0;
+
+       return gsp->func->fini(gsp, suspend);
+}
+
+static int
+nvkm_gsp_init(struct nvkm_subdev *subdev)
+{
+       struct nvkm_gsp *gsp = nvkm_gsp(subdev);
+
+       if (!gsp->func->init)
+               return 0;
+
+       return gsp->func->init(gsp);
+}
+
+static int
+nvkm_gsp_oneinit(struct nvkm_subdev *subdev)
+{
+       struct nvkm_gsp *gsp = nvkm_gsp(subdev);
+
+       if (!gsp->func->oneinit)
+               return 0;
+
+       return gsp->func->oneinit(gsp);
+}
 
 static void *
 nvkm_gsp_dtor(struct nvkm_subdev *subdev)
 {
        struct nvkm_gsp *gsp = nvkm_gsp(subdev);
+
+       if (gsp->func && gsp->func->dtor)
+               gsp->func->dtor(gsp);
+
        nvkm_falcon_dtor(&gsp->falcon);
        return gsp;
 }
@@ -36,6 +99,9 @@ nvkm_gsp_dtor(struct nvkm_subdev *subdev)
 static const struct nvkm_subdev_func
 nvkm_gsp = {
        .dtor = nvkm_gsp_dtor,
+       .oneinit = nvkm_gsp_oneinit,
+       .init = nvkm_gsp_init,
+       .fini = nvkm_gsp_fini,
 };
 
 int
@@ -54,6 +120,8 @@ nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
                return PTR_ERR(fwif);
 
        gsp->func = fwif->func;
+       gsp->rm = gsp->func->rm;
 
-       return nvkm_falcon_ctor(gsp->func->flcn, &gsp->subdev, gsp->subdev.name, 0, &gsp->falcon);
+       return nvkm_falcon_ctor(gsp->func->flcn, &gsp->subdev, gsp->subdev.name, 0x110000,
+                               &gsp->falcon);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
new file mode 100644 (file)
index 0000000..330d72b
--- /dev/null
@@ -0,0 +1,359 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/pmu.h>
+
+#include <nvfw/fw.h>
+
+union nvfw_falcon_appif_hdr {
+       struct nvfw_falcon_appif_hdr_v1 {
+               u8 ver;
+               u8 hdr;
+               u8 len;
+               u8 cnt;
+       } v1;
+};
+
+union nvfw_falcon_appif {
+       struct nvfw_falcon_appif_v1 {
+#define NVFW_FALCON_APPIF_ID_DMEMMAPPER 0x00000004
+               u32 id;
+               u32 dmem_base;
+       } v1;
+};
+
+union nvfw_falcon_appif_dmemmapper {
+       struct {
+               u32 signature;
+               u16 version;
+               u16 size;
+               u32 cmd_in_buffer_offset;
+               u32 cmd_in_buffer_size;
+               u32 cmd_out_buffer_offset;
+               u32 cmd_out_buffer_size;
+               u32 nvf_img_data_buffer_offset;
+               u32 nvf_img_data_buffer_size;
+               u32 printf_buffer_hdr;
+               u32 ucode_build_time_stamp;
+               u32 ucode_signature;
+#define NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS 0x00000015
+#define NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB   0x00000019
+               u32 init_cmd;
+               u32 ucode_feature;
+               u32 ucode_cmd_mask0;
+               u32 ucode_cmd_mask1;
+               u32 multi_tgt_tbl;
+       } v3;
+};
+
+struct nvfw_fwsec_frts_cmd {
+       struct {
+           u32 ver;
+           u32 hdr;
+           u64 addr;
+           u32 size;
+           u32 flags;
+       } read_vbios;
+       struct {
+           u32 ver;
+           u32 hdr;
+           u32 addr;
+           u32 size;
+#define NVFW_FRTS_CMD_REGION_TYPE_FB 0x00000002
+           u32 type;
+       } frts_region;
+};
+
+static int
+nvkm_gsp_fwsec_patch(struct nvkm_gsp *gsp, struct nvkm_falcon_fw *fw, u32 if_offset, u32 init_cmd)
+{
+       union nvfw_falcon_appif_hdr *hdr = (void *)(fw->fw.img + fw->dmem_base_img + if_offset);
+       const u8 *dmem = fw->fw.img + fw->dmem_base_img;
+       int i;
+
+       if (WARN_ON(hdr->v1.ver != 1))
+               return -EINVAL;
+
+       for (i = 0; i < hdr->v1.cnt; i++) {
+               union nvfw_falcon_appif *app = (void *)((u8 *)hdr + hdr->v1.hdr + i * hdr->v1.len);
+               union nvfw_falcon_appif_dmemmapper *dmemmap;
+               struct nvfw_fwsec_frts_cmd *frtscmd;
+
+               if (app->v1.id != NVFW_FALCON_APPIF_ID_DMEMMAPPER)
+                       continue;
+
+               dmemmap = (void *)(dmem + app->v1.dmem_base);
+               dmemmap->v3.init_cmd = init_cmd;
+
+               frtscmd = (void *)(dmem + dmemmap->v3.cmd_in_buffer_offset);
+
+               frtscmd->read_vbios.ver = 1;
+               frtscmd->read_vbios.hdr = sizeof(frtscmd->read_vbios);
+               frtscmd->read_vbios.addr = 0;
+               frtscmd->read_vbios.size = 0;
+               frtscmd->read_vbios.flags = 2;
+
+               if (init_cmd == NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS) {
+                       frtscmd->frts_region.ver = 1;
+                       frtscmd->frts_region.hdr = sizeof(frtscmd->frts_region);
+                       frtscmd->frts_region.addr = gsp->fb.wpr2.frts.addr >> 12;
+                       frtscmd->frts_region.size = gsp->fb.wpr2.frts.size >> 12;
+                       frtscmd->frts_region.type = NVFW_FRTS_CMD_REGION_TYPE_FB;
+               }
+
+               break;
+       }
+
+       if (WARN_ON(i == hdr->v1.cnt))
+               return -EINVAL;
+
+       return 0;
+}
+
+union nvfw_falcon_ucode_desc {
+       struct nvkm_falcon_ucode_desc_v2 {
+               u32 Hdr;
+               u32 StoredSize;
+               u32 UncompressedSize;
+               u32 VirtualEntry;
+               u32 InterfaceOffset;
+               u32 IMEMPhysBase;
+               u32 IMEMLoadSize;
+               u32 IMEMVirtBase;
+               u32 IMEMSecBase;
+               u32 IMEMSecSize;
+               u32 DMEMOffset;
+               u32 DMEMPhysBase;
+               u32 DMEMLoadSize;
+               u32 altIMEMLoadSize;
+               u32 altDMEMLoadSize;
+       } v2;
+
+       struct nvkm_falcon_ucode_desc_v3 {
+               u32 Hdr;
+               u32 StoredSize;
+               u32 PKCDataOffset;
+               u32 InterfaceOffset;
+               u32 IMEMPhysBase;
+               u32 IMEMLoadSize;
+               u32 IMEMVirtBase;
+               u32 DMEMPhysBase;
+               u32 DMEMLoadSize;
+               u16 EngineIdMask;
+               u8  UcodeId;
+               u8  SignatureCount;
+               u16 SignatureVersions;
+               u16 Reserved;
+       } v3;
+};
+
+static int
+nvkm_gsp_fwsec_v2(struct nvkm_gsp *gsp, const char *name,
+                 const struct nvkm_falcon_ucode_desc_v2 *desc, u32 size, u32 init_cmd,
+                 struct nvkm_falcon_fw *fw)
+{
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       const struct firmware *bl;
+       const struct nvfw_bin_hdr *hdr;
+       const struct nvfw_bl_desc *bld;
+       int ret;
+
+       /* Build ucode. */
+       ret = nvkm_falcon_fw_ctor(gsp->func->fwsec, name, subdev->device, true,
+                                 (u8 *)desc + size, desc->IMEMLoadSize + desc->DMEMLoadSize,
+                                 &gsp->falcon, fw);
+       if (WARN_ON(ret))
+               return ret;
+
+       fw->nmem_base_img = 0;
+       fw->nmem_base = desc->IMEMPhysBase;
+       fw->nmem_size = desc->IMEMLoadSize - desc->IMEMSecSize;
+
+       fw->imem_base_img = 0;
+       fw->imem_base = desc->IMEMSecBase;
+       fw->imem_size = desc->IMEMSecSize;
+
+       fw->dmem_base_img = desc->DMEMOffset;
+       fw->dmem_base = desc->DMEMPhysBase;
+       fw->dmem_size = desc->DMEMLoadSize;
+
+       /* Bootloader. */
+       ret = nvkm_firmware_get(subdev, "acr/bl", 0, &bl);
+       if (ret)
+               return ret;
+
+       hdr = nvfw_bin_hdr(subdev, bl->data);
+       bld = nvfw_bl_desc(subdev, bl->data + hdr->header_offset);
+
+       fw->boot_addr = bld->start_tag << 8;
+       fw->boot_size = bld->code_size;
+       fw->boot = kmemdup(bl->data + hdr->data_offset + bld->code_off, fw->boot_size, GFP_KERNEL);
+       if (!fw->boot)
+               ret = -ENOMEM;
+
+       nvkm_firmware_put(bl);
+
+       /* Patch in interface data. */
+       return nvkm_gsp_fwsec_patch(gsp, fw, desc->InterfaceOffset, init_cmd);
+}
+
+static int
+nvkm_gsp_fwsec_v3(struct nvkm_gsp *gsp, const char *name,
+                 const struct nvkm_falcon_ucode_desc_v3 *desc, u32 size, u32 init_cmd,
+                 struct nvkm_falcon_fw *fw)
+{
+       struct nvkm_device *device = gsp->subdev.device;
+       struct nvkm_bios *bios = device->bios;
+       int ret;
+
+       /* Build ucode. */
+       ret = nvkm_falcon_fw_ctor(gsp->func->fwsec, name, device, true,
+                                 (u8 *)desc + size, desc->IMEMLoadSize + desc->DMEMLoadSize,
+                                 &gsp->falcon, fw);
+       if (WARN_ON(ret))
+               return ret;
+
+       fw->imem_base_img = 0;
+       fw->imem_base = desc->IMEMPhysBase;
+       fw->imem_size = desc->IMEMLoadSize;
+       fw->dmem_base_img = desc->IMEMLoadSize;
+       fw->dmem_base = desc->DMEMPhysBase;
+       fw->dmem_size = ALIGN(desc->DMEMLoadSize, 256);
+       fw->dmem_sign = desc->PKCDataOffset;
+       fw->boot_addr = 0;
+       fw->fuse_ver = desc->SignatureVersions;
+       fw->ucode_id = desc->UcodeId;
+       fw->engine_id = desc->EngineIdMask;
+
+       /* Patch in signature. */
+       ret = nvkm_falcon_fw_sign(fw, fw->dmem_base_img + desc->PKCDataOffset, 96 * 4,
+                                 nvbios_pointer(bios, 0), desc->SignatureCount,
+                                 (u8 *)desc + 0x2c - (u8 *)nvbios_pointer(bios, 0), 0, 0);
+       if (WARN_ON(ret))
+               return ret;
+
+       /* Patch in interface data. */
+       return nvkm_gsp_fwsec_patch(gsp, fw, desc->InterfaceOffset, init_cmd);
+}
+
+static int
+nvkm_gsp_fwsec(struct nvkm_gsp *gsp, const char *name, u32 init_cmd)
+{
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       struct nvkm_device *device = subdev->device;
+       struct nvkm_bios *bios = device->bios;
+       const union nvfw_falcon_ucode_desc *desc;
+       struct nvbios_pmuE flcn_ucode;
+       u8 idx, ver, hdr;
+       u32 data;
+       u16 size, vers;
+       struct nvkm_falcon_fw fw = {};
+       u32 mbox0 = 0;
+       int ret;
+
+       /* Lookup in VBIOS. */
+       for (idx = 0; (data = nvbios_pmuEp(bios, idx, &ver, &hdr, &flcn_ucode)); idx++) {
+               if (flcn_ucode.type == 0x85)
+                       break;
+       }
+
+       if (WARN_ON(!data))
+               return -EINVAL;
+
+       /* Deteremine version. */
+       desc = nvbios_pointer(bios, flcn_ucode.data);
+       if (WARN_ON(!(desc->v2.Hdr & 0x00000001)))
+               return -EINVAL;
+
+       size = (desc->v2.Hdr & 0xffff0000) >> 16;
+       vers = (desc->v2.Hdr & 0x0000ff00) >> 8;
+
+       switch (vers) {
+       case 2: ret = nvkm_gsp_fwsec_v2(gsp, name, &desc->v2, size, init_cmd, &fw); break;
+       case 3: ret = nvkm_gsp_fwsec_v3(gsp, name, &desc->v3, size, init_cmd, &fw); break;
+       default:
+               nvkm_error(subdev, "%s(v%d): version unknown\n", name, vers);
+               return -EINVAL;
+       }
+
+       if (ret) {
+               nvkm_error(subdev, "%s(v%d): %d\n", name, vers, ret);
+               return ret;
+       }
+
+       /* Boot. */
+       ret = nvkm_falcon_fw_boot(&fw, subdev, true, &mbox0, NULL, 0, 0);
+       nvkm_falcon_fw_dtor(&fw);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+int
+nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp)
+{
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       struct nvkm_device *device = subdev->device;
+       int ret;
+       u32 err;
+
+       ret = nvkm_gsp_fwsec(gsp, "fwsec-sb", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB);
+       if (ret)
+               return ret;
+
+       /* Verify. */
+       err = nvkm_rd32(device, 0x001400 + (0xf * 4)) & 0x0000ffff;
+       if (err) {
+               nvkm_error(subdev, "fwsec-sb: 0x%04x\n", err);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+int
+nvkm_gsp_fwsec_frts(struct nvkm_gsp *gsp)
+{
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       struct nvkm_device *device = subdev->device;
+       int ret;
+       u32 err, wpr2_lo, wpr2_hi;
+
+       ret = nvkm_gsp_fwsec(gsp, "fwsec-frts", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS);
+       if (ret)
+               return ret;
+
+       /* Verify. */
+       err = nvkm_rd32(device, 0x001400 + (0xe * 4)) >> 16;
+       if (err) {
+               nvkm_error(subdev, "fwsec-frts: 0x%04x\n", err);
+               return -EIO;
+       }
+
+       wpr2_lo = nvkm_rd32(device, 0x1fa824);
+       wpr2_hi = nvkm_rd32(device, 0x1fa828);
+       nvkm_debug(subdev, "fwsec-frts: WPR2 @ %08x - %08x\n", wpr2_lo, wpr2_hi);
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
new file mode 100644 (file)
index 0000000..223f68b
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2022 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static const struct nvkm_falcon_func
+ga100_gsp_flcn = {
+       .disable = gm200_flcn_disable,
+       .enable = gm200_flcn_enable,
+       .addr2 = 0x1000,
+       .riscv_irqmask = 0x2b4,
+       .reset_eng = gp102_flcn_reset_eng,
+       .reset_wait_mem_scrubbing = gm200_flcn_reset_wait_mem_scrubbing,
+       .bind_inst = gm200_flcn_bind_inst,
+       .bind_stat = gm200_flcn_bind_stat,
+       .bind_intr = true,
+       .imem_pio = &gm200_flcn_imem_pio,
+       .dmem_pio = &gm200_flcn_dmem_pio,
+       .riscv_active = tu102_flcn_riscv_active,
+       .intr_retrigger = ga100_flcn_intr_retrigger,
+};
+
+static const struct nvkm_gsp_func
+ga100_gsp_r535_113_01 = {
+       .flcn = &ga100_gsp_flcn,
+       .fwsec = &tu102_gsp_fwsec,
+
+       .sig_section = ".fwsignature_ga100",
+
+       .wpr_heap.base_size = 8 << 20,
+       .wpr_heap.min_size = 64 << 20,
+
+       .booter.ctor = tu102_gsp_booter_ctor,
+
+       .dtor = r535_gsp_dtor,
+       .oneinit = tu102_gsp_oneinit,
+       .init = r535_gsp_init,
+       .fini = r535_gsp_fini,
+       .reset = tu102_gsp_reset,
+
+       .rm = &r535_gsp_rm,
+};
+
+static struct nvkm_gsp_fwif
+ga100_gsps[] = {
+       {  0,  r535_gsp_load, &ga100_gsp_r535_113_01, "535.113.01" },
+       { -1, gv100_gsp_nofw, &gv100_gsp },
+       {}
+};
+
+int
+ga100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+             struct nvkm_gsp **pgsp)
+{
+       return nvkm_gsp_new_(ga100_gsps, device, type, inst, pgsp);
+}
index a3996ceca99552de6415056d4b821285918279be..4c4b4168a266bada5327b4e2d2ecfc0a79756175 100644 (file)
  */
 #include "priv.h"
 
-static const struct nvkm_falcon_func
+#include <nvfw/flcn.h>
+#include <nvfw/fw.h>
+#include <nvfw/hs.h>
+
+int
+ga102_gsp_reset(struct nvkm_gsp *gsp)
+{
+       int ret;
+
+       ret = gsp->falcon.func->reset_eng(&gsp->falcon);
+       if (ret)
+               return ret;
+
+       nvkm_falcon_mask(&gsp->falcon, 0x1668, 0x00000111, 0x00000111);
+       return 0;
+}
+
+int
+ga102_gsp_booter_ctor(struct nvkm_gsp *gsp, const char *name, const struct firmware *blob,
+                     struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw)
+{
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       const struct nvkm_falcon_fw_func *func = &ga102_flcn_fw;
+       const struct nvfw_bin_hdr *hdr;
+       const struct nvfw_hs_header_v2 *hshdr;
+       const struct nvfw_hs_load_header_v2 *lhdr;
+       u32 loc, sig, cnt, *meta;
+       int ret;
+
+       hdr = nvfw_bin_hdr(subdev, blob->data);
+       hshdr = nvfw_hs_header_v2(subdev, blob->data + hdr->header_offset);
+       meta = (u32 *)(blob->data + hshdr->meta_data_offset);
+       loc = *(u32 *)(blob->data + hshdr->patch_loc);
+       sig = *(u32 *)(blob->data + hshdr->patch_sig);
+       cnt = *(u32 *)(blob->data + hshdr->num_sig);
+
+       ret = nvkm_falcon_fw_ctor(func, name, subdev->device, true,
+                                 blob->data + hdr->data_offset, hdr->data_size, falcon, fw);
+       if (ret)
+               goto done;
+
+       ret = nvkm_falcon_fw_sign(fw, loc, hshdr->sig_prod_size / cnt, blob->data,
+                                 cnt, hshdr->sig_prod_offset + sig, 0, 0);
+       if (ret)
+               goto done;
+
+       lhdr = nvfw_hs_load_header_v2(subdev, blob->data + hshdr->header_offset);
+
+       fw->imem_base_img = lhdr->app[0].offset;
+       fw->imem_base = 0;
+       fw->imem_size = lhdr->app[0].size;
+
+       fw->dmem_base_img = lhdr->os_data_offset;
+       fw->dmem_base = 0;
+       fw->dmem_size = lhdr->os_data_size;
+       fw->dmem_sign = loc - lhdr->os_data_offset;
+
+       fw->boot_addr = lhdr->app[0].offset;
+
+       fw->fuse_ver = meta[0];
+       fw->engine_id = meta[1];
+       fw->ucode_id = meta[2];
+
+done:
+       if (ret)
+               nvkm_falcon_fw_dtor(fw);
+
+       return ret;
+}
+
+static int
+ga102_gsp_fwsec_signature(struct nvkm_falcon_fw *fw, u32 *src_base_src)
+{
+       struct nvkm_falcon *falcon = fw->falcon;
+       struct nvkm_device *device = falcon->owner->device;
+       u32 sig_fuse_version = fw->fuse_ver;
+       u32 reg_fuse_version;
+       int idx = 0;
+
+       FLCN_DBG(falcon, "brom: %08x %08x", fw->engine_id, fw->ucode_id);
+       FLCN_DBG(falcon, "sig_fuse_version: %08x", sig_fuse_version);
+
+       if (fw->engine_id & 0x00000400) {
+               reg_fuse_version = nvkm_rd32(device, 0x8241c0 + (fw->ucode_id - 1) * 4);
+       } else {
+               WARN_ON(1);
+               return -ENOSYS;
+       }
+
+       FLCN_DBG(falcon, "reg_fuse_version: %08x", reg_fuse_version);
+       reg_fuse_version = BIT(fls(reg_fuse_version));
+       FLCN_DBG(falcon, "reg_fuse_version: %08x", reg_fuse_version);
+       if (!(reg_fuse_version & fw->fuse_ver))
+               return -EINVAL;
+
+       while (!(reg_fuse_version & sig_fuse_version & 1)) {
+               idx += (sig_fuse_version & 1);
+               reg_fuse_version >>= 1;
+               sig_fuse_version >>= 1;
+       }
+
+       return idx;
+}
+
+const struct nvkm_falcon_fw_func
+ga102_gsp_fwsec = {
+       .signature = ga102_gsp_fwsec_signature,
+       .reset = gm200_flcn_fw_reset,
+       .load = ga102_flcn_fw_load,
+       .boot = ga102_flcn_fw_boot,
+};
+
+const struct nvkm_falcon_func
 ga102_gsp_flcn = {
        .disable = gm200_flcn_disable,
        .enable = gm200_flcn_enable,
        .select = ga102_flcn_select,
        .addr2 = 0x1000,
+       .riscv_irqmask = 0x528,
        .reset_eng = gp102_flcn_reset_eng,
        .reset_prep = ga102_flcn_reset_prep,
        .reset_wait_mem_scrubbing = ga102_flcn_reset_wait_mem_scrubbing,
        .imem_dma = &ga102_flcn_dma,
        .dmem_dma = &ga102_flcn_dma,
+       .riscv_active = ga102_flcn_riscv_active,
+       .intr_retrigger = ga100_flcn_intr_retrigger,
 };
 
 static const struct nvkm_gsp_func
-ga102_gsp = {
+ga102_gsp_r535_113_01 = {
        .flcn = &ga102_gsp_flcn,
+       .fwsec = &ga102_gsp_fwsec,
+
+       .sig_section = ".fwsignature_ga10x",
+
+       .wpr_heap.os_carveout_size = 20 << 20,
+       .wpr_heap.base_size = 8 << 20,
+       .wpr_heap.min_size = 84 << 20,
+
+       .booter.ctor = ga102_gsp_booter_ctor,
+
+       .dtor = r535_gsp_dtor,
+       .oneinit = tu102_gsp_oneinit,
+       .init = r535_gsp_init,
+       .fini = r535_gsp_fini,
+       .reset = ga102_gsp_reset,
+
+       .rm = &r535_gsp_rm,
 };
 
-static int
-ga102_gsp_nofw(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
-{
-       return 0;
-}
+static const struct nvkm_gsp_func
+ga102_gsp = {
+       .flcn = &ga102_gsp_flcn,
+};
 
 static struct nvkm_gsp_fwif
 ga102_gsps[] = {
-       { -1, ga102_gsp_nofw, &ga102_gsp },
+       {  0,  r535_gsp_load, &ga102_gsp_r535_113_01, "535.113.01" },
+       { -1, gv100_gsp_nofw, &ga102_gsp },
        {}
 };
 
index da6a809cd31731dca04ca2ea623c1be5415f6bbd..62d9289bcaa5f971cd5aef3a10ddb50808eaa0c7 100644 (file)
@@ -34,12 +34,12 @@ gv100_gsp_flcn = {
        .dmem_pio = &gm200_flcn_dmem_pio,
 };
 
-static const struct nvkm_gsp_func
+const struct nvkm_gsp_func
 gv100_gsp = {
        .flcn = &gv100_gsp_flcn,
 };
 
-static int
+int
 gv100_gsp_nofw(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
 {
        return 0;
index 89749a40203c46b95f78eda42fa1d29745bed8fa..9f4a62375a278a079aaba528fd99db05589b8e02 100644 (file)
@@ -4,16 +4,67 @@
 #include <subdev/gsp.h>
 enum nvkm_acr_lsf_id;
 
-struct nvkm_gsp_func {
-       const struct nvkm_falcon_func *flcn;
-};
+int nvkm_gsp_fwsec_frts(struct nvkm_gsp *);
+int nvkm_gsp_fwsec_sb(struct nvkm_gsp *);
 
 struct nvkm_gsp_fwif {
        int version;
        int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *);
        const struct nvkm_gsp_func *func;
+       const char *ver;
+       bool enable;
 };
 
+int gv100_gsp_nofw(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+int  r535_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+
+struct nvkm_gsp_func {
+       const struct nvkm_falcon_func *flcn;
+       const struct nvkm_falcon_fw_func *fwsec;
+
+       char *sig_section;
+
+       struct {
+               u32 os_carveout_size;
+               u32 base_size;
+               u64 min_size;
+       } wpr_heap;
+
+       struct {
+               int (*ctor)(struct nvkm_gsp *, const char *name, const struct firmware *,
+                           struct nvkm_falcon *, struct nvkm_falcon_fw *);
+       } booter;
+
+       void (*dtor)(struct nvkm_gsp *);
+       int (*oneinit)(struct nvkm_gsp *);
+       int (*init)(struct nvkm_gsp *);
+       int (*fini)(struct nvkm_gsp *, bool suspend);
+       int (*reset)(struct nvkm_gsp *);
+
+       const struct nvkm_gsp_rm *rm;
+};
+
+extern const struct nvkm_falcon_func tu102_gsp_flcn;
+extern const struct nvkm_falcon_fw_func tu102_gsp_fwsec;
+int tu102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *,
+                         struct nvkm_falcon *, struct nvkm_falcon_fw *);
+int tu102_gsp_oneinit(struct nvkm_gsp *);
+int tu102_gsp_reset(struct nvkm_gsp *);
+
+extern const struct nvkm_falcon_func ga102_gsp_flcn;
+extern const struct nvkm_falcon_fw_func ga102_gsp_fwsec;
+int ga102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *,
+                         struct nvkm_falcon *, struct nvkm_falcon_fw *);
+int ga102_gsp_reset(struct nvkm_gsp *);
+
+void r535_gsp_dtor(struct nvkm_gsp *);
+int r535_gsp_oneinit(struct nvkm_gsp *);
+int r535_gsp_init(struct nvkm_gsp *);
+int r535_gsp_fini(struct nvkm_gsp *, bool suspend);
+extern const struct nvkm_gsp_rm r535_gsp_rm;
+
 int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
                  struct nvkm_gsp **);
+
+extern const struct nvkm_gsp_func gv100_gsp;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
new file mode 100644 (file)
index 0000000..e31f964
--- /dev/null
@@ -0,0 +1,2236 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/pci.h>
+#include <subdev/timer.h>
+#include <subdev/vfn.h>
+#include <engine/fifo/chan.h>
+#include <engine/sec2.h>
+
+#include <nvfw/fw.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+#include <nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h>
+#include <nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h>
+#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h>
+#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h>
+#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h>
+#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h>
+#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h>
+#include <nvrm/535.113.01/nvidia/generated/g_os_nvoc.h>
+#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+
+#include <linux/acpi.h>
+
+#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
+#define GSP_MSG_MAX_SIZE GSP_PAGE_MIN_SIZE * 16
+
+struct r535_gsp_msg {
+       u8 auth_tag_buffer[16];
+       u8 aad_buffer[16];
+       u32 checksum;
+       u32 sequence;
+       u32 elem_count;
+       u32 pad;
+       u8  data[];
+};
+
+#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data)
+
+static void *
+r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
+{
+       struct r535_gsp_msg *mqe;
+       u32 size, rptr = *gsp->msgq.rptr;
+       int used;
+       u8 *msg;
+       u32 len;
+
+       size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + repc, GSP_PAGE_SIZE);
+       if (WARN_ON(!size || size >= gsp->msgq.cnt))
+               return ERR_PTR(-EINVAL);
+
+       do {
+               u32 wptr = *gsp->msgq.wptr;
+
+               used = wptr + gsp->msgq.cnt - rptr;
+               if (used >= gsp->msgq.cnt)
+                       used -= gsp->msgq.cnt;
+               if (used >= size)
+                       break;
+
+               usleep_range(1, 2);
+       } while (--(*ptime));
+
+       if (WARN_ON(!*ptime))
+               return ERR_PTR(-ETIMEDOUT);
+
+       mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + rptr * 0x1000);
+
+       if (prepc) {
+               *prepc = (used * GSP_PAGE_SIZE) - sizeof(*mqe);
+               return mqe->data;
+       }
+
+       msg = kvmalloc(repc, GFP_KERNEL);
+       if (!msg)
+               return ERR_PTR(-ENOMEM);
+
+       len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe);
+       len = min_t(u32, repc, len);
+       memcpy(msg, mqe->data, len);
+
+       rptr += DIV_ROUND_UP(len, GSP_PAGE_SIZE);
+       if (rptr == gsp->msgq.cnt)
+               rptr = 0;
+
+       repc -= len;
+
+       if (repc) {
+               mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
+               memcpy(msg + len, mqe, repc);
+
+               rptr += DIV_ROUND_UP(repc, GSP_PAGE_SIZE);
+       }
+
+       mb();
+       (*gsp->msgq.rptr) = rptr;
+       return msg;
+}
+
+static void *
+r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 repc, int *ptime)
+{
+       return r535_gsp_msgq_wait(gsp, repc, NULL, ptime);
+}
+
+static int
+r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv)
+{
+       struct r535_gsp_msg *cmd = container_of(argv, typeof(*cmd), data);
+       struct r535_gsp_msg *cqe;
+       u32 argc = cmd->checksum;
+       u64 *ptr = (void *)cmd;
+       u64 *end;
+       u64 csum = 0;
+       int free, time = 1000000;
+       u32 wptr, size;
+       u32 off = 0;
+
+       argc = ALIGN(GSP_MSG_HDR_SIZE + argc, GSP_PAGE_SIZE);
+
+       end = (u64 *)((char *)ptr + argc);
+       cmd->pad = 0;
+       cmd->checksum = 0;
+       cmd->sequence = gsp->cmdq.seq++;
+       cmd->elem_count = DIV_ROUND_UP(argc, 0x1000);
+
+       while (ptr < end)
+               csum ^= *ptr++;
+
+       cmd->checksum = upper_32_bits(csum) ^ lower_32_bits(csum);
+
+       wptr = *gsp->cmdq.wptr;
+       do {
+               do {
+                       free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1;
+                       if (free >= gsp->cmdq.cnt)
+                               free -= gsp->cmdq.cnt;
+                       if (free >= 1)
+                               break;
+
+                       usleep_range(1, 2);
+               } while(--time);
+
+               if (WARN_ON(!time)) {
+                       kvfree(cmd);
+                       return -ETIMEDOUT;
+               }
+
+               cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
+               size = min_t(u32, argc, (gsp->cmdq.cnt - wptr) * GSP_PAGE_SIZE);
+               memcpy(cqe, (u8 *)cmd + off, size);
+
+               wptr += DIV_ROUND_UP(size, 0x1000);
+               if (wptr == gsp->cmdq.cnt)
+                       wptr = 0;
+
+               off  += size;
+               argc -= size;
+       } while(argc);
+
+       nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr);
+       wmb();
+       (*gsp->cmdq.wptr) = wptr;
+       mb();
+
+       nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000);
+
+       kvfree(cmd);
+       return 0;
+}
+
+static void *
+r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 argc)
+{
+       struct r535_gsp_msg *cmd;
+       u32 size = GSP_MSG_HDR_SIZE + argc;
+
+       size = ALIGN(size, GSP_MSG_MIN_SIZE);
+       cmd = kvzalloc(size, GFP_KERNEL);
+       if (!cmd)
+               return ERR_PTR(-ENOMEM);
+
+       cmd->checksum = argc;
+       return cmd->data;
+}
+
+struct nvfw_gsp_rpc {
+       u32 header_version;
+       u32 signature;
+       u32 length;
+       u32 function;
+       u32 rpc_result;
+       u32 rpc_result_private;
+       u32 sequence;
+       union {
+               u32 spare;
+               u32 cpuRmGfid;
+       };
+       u8  data[];
+};
+
+static void
+r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg)
+{
+       kvfree(msg);
+}
+
+static void
+r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl)
+{
+       if (gsp->subdev.debug >= lvl) {
+               nvkm_printk__(&gsp->subdev, lvl, info,
+                             "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n",
+                             msg->function, msg->length, msg->length - sizeof(*msg),
+                             msg->rpc_result, msg->rpc_result_private);
+               print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1,
+                              msg->data, msg->length - sizeof(*msg), true);
+       }
+}
+
+static struct nvfw_gsp_rpc *
+r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 repc)
+{
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       struct nvfw_gsp_rpc *msg;
+       int time = 4000000, i;
+       u32 size;
+
+retry:
+       msg = r535_gsp_msgq_wait(gsp, sizeof(*msg), &size, &time);
+       if (IS_ERR_OR_NULL(msg))
+               return msg;
+
+       msg = r535_gsp_msgq_recv(gsp, msg->length, &time);
+       if (IS_ERR_OR_NULL(msg))
+               return msg;
+
+       if (msg->rpc_result) {
+               r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR);
+               r535_gsp_msg_done(gsp, msg);
+               return ERR_PTR(-EINVAL);
+       }
+
+       r535_gsp_msg_dump(gsp, msg, NV_DBG_TRACE);
+
+       if (fn && msg->function == fn) {
+               if (repc) {
+                       if (msg->length < sizeof(*msg) + repc) {
+                               nvkm_error(subdev, "msg len %d < %zd\n",
+                                          msg->length, sizeof(*msg) + repc);
+                               r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR);
+                               r535_gsp_msg_done(gsp, msg);
+                               return ERR_PTR(-EIO);
+                       }
+
+                       return msg;
+               }
+
+               r535_gsp_msg_done(gsp, msg);
+               return NULL;
+       }
+
+       for (i = 0; i < gsp->msgq.ntfy_nr; i++) {
+               struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i];
+
+               if (ntfy->fn == msg->function) {
+                       ntfy->func(ntfy->priv, ntfy->fn, msg->data, msg->length - sizeof(*msg));
+                       break;
+               }
+       }
+
+       if (i == gsp->msgq.ntfy_nr)
+               r535_gsp_msg_dump(gsp, msg, NV_DBG_WARN);
+
+       r535_gsp_msg_done(gsp, msg);
+       if (fn)
+               goto retry;
+
+       if (*gsp->msgq.rptr != *gsp->msgq.wptr)
+               goto retry;
+
+       return NULL;
+}
+
+static int
+r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv)
+{
+       int ret = 0;
+
+       mutex_lock(&gsp->msgq.mutex);
+       if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) {
+               ret = -ENOSPC;
+       } else {
+               gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn;
+               gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func;
+               gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv;
+               gsp->msgq.ntfy_nr++;
+       }
+       mutex_unlock(&gsp->msgq.mutex);
+       return ret;
+}
+
+static int
+r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn)
+{
+       void *repv;
+
+       mutex_lock(&gsp->cmdq.mutex);
+       repv = r535_gsp_msg_recv(gsp, fn, 0);
+       mutex_unlock(&gsp->cmdq.mutex);
+       if (IS_ERR(repv))
+               return PTR_ERR(repv);
+
+       return 0;
+}
+
+static void *
+r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
+{
+       struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data);
+       struct nvfw_gsp_rpc *msg;
+       u32 fn = rpc->function;
+       void *repv = NULL;
+       int ret;
+
+       if (gsp->subdev.debug >= NV_DBG_TRACE) {
+               nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function,
+                          rpc->length, rpc->length - sizeof(*rpc));
+               print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1,
+                              rpc->data, rpc->length - sizeof(*rpc), true);
+       }
+
+       ret = r535_gsp_cmdq_push(gsp, rpc);
+       if (ret) {
+               mutex_unlock(&gsp->cmdq.mutex);
+               return ERR_PTR(ret);
+       }
+
+       if (wait) {
+               msg = r535_gsp_msg_recv(gsp, fn, repc);
+               if (!IS_ERR_OR_NULL(msg))
+                       repv = msg->data;
+               else
+                       repv = msg;
+       }
+
+       return repv;
+}
+
+static void
+r535_gsp_event_dtor(struct nvkm_gsp_event *event)
+{
+       struct nvkm_gsp_device *device = event->device;
+       struct nvkm_gsp_client *client = device->object.client;
+       struct nvkm_gsp *gsp = client->gsp;
+
+       mutex_lock(&gsp->client_id.mutex);
+       if (event->func) {
+               list_del(&event->head);
+               event->func = NULL;
+       }
+       mutex_unlock(&gsp->client_id.mutex);
+
+       nvkm_gsp_rm_free(&event->object);
+       event->device = NULL;
+}
+
+static int
+r535_gsp_device_event_get(struct nvkm_gsp_event *event)
+{
+       struct nvkm_gsp_device *device = event->device;
+       NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice,
+                                   NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->event = event->id;
+       ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
+       return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl);
+}
+
+static int
+r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
+                          nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
+{
+       struct nvkm_gsp_client *client = device->object.client;
+       struct nvkm_gsp *gsp = client->gsp;
+       NV0005_ALLOC_PARAMETERS *args;
+       int ret;
+
+       args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle,
+                                    NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args),
+                                    &event->object);
+       if (IS_ERR(args))
+               return PTR_ERR(args);
+
+       args->hParentClient = client->object.handle;
+       args->hSrcResource = 0;
+       args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX;
+       args->notifyIndex = NV01_EVENT_CLIENT_RM | id;
+       args->data = NULL;
+
+       ret = nvkm_gsp_rm_alloc_wr(&event->object, args);
+       if (ret)
+               return ret;
+
+       event->device = device;
+       event->id = id;
+
+       ret = r535_gsp_device_event_get(event);
+       if (ret) {
+               nvkm_gsp_event_dtor(event);
+               return ret;
+       }
+
+       mutex_lock(&gsp->client_id.mutex);
+       event->func = func;
+       list_add(&event->head, &client->events);
+       mutex_unlock(&gsp->client_id.mutex);
+       return 0;
+}
+
+static void
+r535_gsp_device_dtor(struct nvkm_gsp_device *device)
+{
+       nvkm_gsp_rm_free(&device->subdevice);
+       nvkm_gsp_rm_free(&device->object);
+}
+
+static int
+r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device)
+{
+       NV2080_ALLOC_PARAMETERS *args;
+
+       return nvkm_gsp_rm_alloc(&device->object, 0x5d1d0000, NV20_SUBDEVICE_0, sizeof(*args),
+                                &device->subdevice);
+}
+
+static int
+r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
+{
+       NV0080_ALLOC_PARAMETERS *args;
+       int ret;
+
+       args = nvkm_gsp_rm_alloc_get(&client->object, 0xde1d0000, NV01_DEVICE_0, sizeof(*args),
+                                    &device->object);
+       if (IS_ERR(args))
+               return PTR_ERR(args);
+
+       args->hClientShare = client->object.handle;
+
+       ret = nvkm_gsp_rm_alloc_wr(&device->object, args);
+       if (ret)
+               return ret;
+
+       ret = r535_gsp_subdevice_ctor(device);
+       if (ret)
+               nvkm_gsp_rm_free(&device->object);
+
+       return ret;
+}
+
+static void
+r535_gsp_client_dtor(struct nvkm_gsp_client *client)
+{
+       struct nvkm_gsp *gsp = client->gsp;
+
+       nvkm_gsp_rm_free(&client->object);
+
+       mutex_lock(&gsp->client_id.mutex);
+       idr_remove(&gsp->client_id.idr, client->object.handle & 0xffff);
+       mutex_unlock(&gsp->client_id.mutex);
+
+       client->gsp = NULL;
+}
+
+static int
+r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
+{
+       NV0000_ALLOC_PARAMETERS *args;
+       int ret;
+
+       mutex_lock(&gsp->client_id.mutex);
+       ret = idr_alloc(&gsp->client_id.idr, client, 0, 0xffff + 1, GFP_KERNEL);
+       mutex_unlock(&gsp->client_id.mutex);
+       if (ret < 0)
+               return ret;
+
+       client->gsp = gsp;
+       client->object.client = client;
+       INIT_LIST_HEAD(&client->events);
+
+       args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args),
+                                    &client->object);
+       if (IS_ERR(args)) {
+               r535_gsp_client_dtor(client);
+               return ret;
+       }
+
+       args->hClient = client->object.handle;
+       args->processID = ~0;
+
+       ret = nvkm_gsp_rm_alloc_wr(&client->object, args);
+       if (ret) {
+               r535_gsp_client_dtor(client);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int
+r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object)
+{
+       struct nvkm_gsp_client *client = object->client;
+       struct nvkm_gsp *gsp = client->gsp;
+       rpc_free_v03_00 *rpc;
+
+       nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n",
+                  client->object.handle, object->handle);
+
+       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc));
+       if (WARN_ON(IS_ERR_OR_NULL(rpc)))
+               return -EIO;
+
+       rpc->params.hRoot = client->object.handle;
+       rpc->params.hObjectParent = 0;
+       rpc->params.hObjectOld = object->handle;
+       return nvkm_gsp_rpc_wr(gsp, rpc, true);
+}
+
+static void
+r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *repv)
+{
+       rpc_gsp_rm_alloc_v03_00 *rpc = container_of(repv, typeof(*rpc), params);
+
+       nvkm_gsp_rpc_done(object->client->gsp, rpc);
+}
+
+static void *
+r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
+{
+       rpc_gsp_rm_alloc_v03_00 *rpc = container_of(argv, typeof(*rpc), params);
+       struct nvkm_gsp *gsp = object->client->gsp;
+       void *ret;
+
+       rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc) + repc);
+       if (IS_ERR_OR_NULL(rpc))
+               return rpc;
+
+       if (rpc->status) {
+               nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
+               ret = ERR_PTR(-EINVAL);
+       } else {
+               ret = repc ? rpc->params : NULL;
+       }
+
+       if (IS_ERR_OR_NULL(ret))
+               nvkm_gsp_rpc_done(gsp, rpc);
+
+       return ret;
+}
+
+static void *
+r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass, u32 argc)
+{
+       struct nvkm_gsp_client *client = object->client;
+       struct nvkm_gsp *gsp = client->gsp;
+       rpc_gsp_rm_alloc_v03_00 *rpc;
+
+       nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x cls:0x%08x argc:%d\n",
+                  client->object.handle, object->parent->handle, object->handle, oclass, argc);
+
+       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, sizeof(*rpc) + argc);
+       if (IS_ERR(rpc))
+               return rpc;
+
+       rpc->hClient = client->object.handle;
+       rpc->hParent = object->parent->handle;
+       rpc->hObject = object->handle;
+       rpc->hClass = oclass;
+       rpc->status = 0;
+       rpc->paramsSize = argc;
+       return rpc->params;
+}
+
+static void
+r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv)
+{
+       rpc_gsp_rm_control_v03_00 *rpc = container_of(repv, typeof(*rpc), params);
+
+       nvkm_gsp_rpc_done(object->client->gsp, rpc);
+}
+
+static void *
+r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
+{
+       rpc_gsp_rm_control_v03_00 *rpc = container_of(argv, typeof(*rpc), params);
+       struct nvkm_gsp *gsp = object->client->gsp;
+       void *ret;
+
+       rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc);
+       if (IS_ERR_OR_NULL(rpc))
+               return rpc;
+
+       if (rpc->status) {
+               nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
+                          object->client->object.handle, object->handle, rpc->cmd, rpc->status);
+               ret = ERR_PTR(-EINVAL);
+       } else {
+               ret = repc ? rpc->params : NULL;
+       }
+
+       if (IS_ERR_OR_NULL(ret))
+               nvkm_gsp_rpc_done(gsp, rpc);
+
+       return ret;
+}
+
+static void *
+r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc)
+{
+       struct nvkm_gsp_client *client = object->client;
+       struct nvkm_gsp *gsp = client->gsp;
+       rpc_gsp_rm_control_v03_00 *rpc;
+
+       nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x argc:%d\n",
+                  client->object.handle, object->handle, cmd, argc);
+
+       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, sizeof(*rpc) + argc);
+       if (IS_ERR(rpc))
+               return rpc;
+
+       rpc->hClient    = client->object.handle;
+       rpc->hObject    = object->handle;
+       rpc->cmd        = cmd;
+       rpc->status     = 0;
+       rpc->paramsSize = argc;
+       return rpc->params;
+}
+
+static void
+r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
+{
+       struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data);
+
+       r535_gsp_msg_done(gsp, rpc);
+}
+
+static void *
+r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc)
+{
+       struct nvfw_gsp_rpc *rpc;
+
+       rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + argc, sizeof(u64)));
+       if (!rpc)
+               return NULL;
+
+       rpc->header_version = 0x03000000;
+       rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V';
+       rpc->function = fn;
+       rpc->rpc_result = 0xffffffff;
+       rpc->rpc_result_private = 0xffffffff;
+       rpc->length = sizeof(*rpc) + argc;
+       return rpc->data;
+}
+
+static void *
+r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
+{
+       struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data);
+       struct r535_gsp_msg *cmd = container_of((void *)rpc, typeof(*cmd), data);
+       const u32 max_msg_size = (16 * 0x1000) - sizeof(struct r535_gsp_msg);
+       const u32 max_rpc_size = max_msg_size - sizeof(*rpc);
+       u32 rpc_size = rpc->length - sizeof(*rpc);
+       void *repv;
+
+       mutex_lock(&gsp->cmdq.mutex);
+       if (rpc_size > max_rpc_size) {
+               const u32 fn = rpc->function;
+
+               /* Adjust length, and send initial RPC. */
+               rpc->length = sizeof(*rpc) + max_rpc_size;
+               cmd->checksum = rpc->length;
+
+               repv = r535_gsp_rpc_send(gsp, argv, false, 0);
+               if (IS_ERR(repv))
+                       goto done;
+
+               argv += max_rpc_size;
+               rpc_size -= max_rpc_size;
+
+               /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */
+               while (rpc_size) {
+                       u32 size = min(rpc_size, max_rpc_size);
+                       void *next;
+
+                       next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size);
+                       if (IS_ERR(next)) {
+                               repv = next;
+                               goto done;
+                       }
+
+                       memcpy(next, argv, size);
+
+                       repv = r535_gsp_rpc_send(gsp, next, false, 0);
+                       if (IS_ERR(repv))
+                               goto done;
+
+                       argv += size;
+                       rpc_size -= size;
+               }
+
+               /* Wait for reply. */
+               if (wait) {
+                       rpc = r535_gsp_msg_recv(gsp, fn, repc);
+                       if (!IS_ERR_OR_NULL(rpc))
+                               repv = rpc->data;
+                       else
+                               repv = rpc;
+               } else {
+                       repv = NULL;
+               }
+       } else {
+               repv = r535_gsp_rpc_send(gsp, argv, wait, repc);
+       }
+
+done:
+       mutex_unlock(&gsp->cmdq.mutex);
+       return repv;
+}
+
+const struct nvkm_gsp_rm
+r535_gsp_rm = {
+       .rpc_get = r535_gsp_rpc_get,
+       .rpc_push = r535_gsp_rpc_push,
+       .rpc_done = r535_gsp_rpc_done,
+
+       .rm_ctrl_get = r535_gsp_rpc_rm_ctrl_get,
+       .rm_ctrl_push = r535_gsp_rpc_rm_ctrl_push,
+       .rm_ctrl_done = r535_gsp_rpc_rm_ctrl_done,
+
+       .rm_alloc_get = r535_gsp_rpc_rm_alloc_get,
+       .rm_alloc_push = r535_gsp_rpc_rm_alloc_push,
+       .rm_alloc_done = r535_gsp_rpc_rm_alloc_done,
+
+       .rm_free = r535_gsp_rpc_rm_free,
+
+       .client_ctor = r535_gsp_client_ctor,
+       .client_dtor = r535_gsp_client_dtor,
+
+       .device_ctor = r535_gsp_device_ctor,
+       .device_dtor = r535_gsp_device_dtor,
+
+       .event_ctor = r535_gsp_device_event_ctor,
+       .event_dtor = r535_gsp_event_dtor,
+};
+
+static void
+r535_gsp_msgq_work(struct work_struct *work)
+{
+       struct nvkm_gsp *gsp = container_of(work, typeof(*gsp), msgq.work);
+
+       mutex_lock(&gsp->cmdq.mutex);
+       if (*gsp->msgq.rptr != *gsp->msgq.wptr)
+               r535_gsp_msg_recv(gsp, 0, 0);
+       mutex_unlock(&gsp->cmdq.mutex);
+}
+
+static irqreturn_t
+r535_gsp_intr(struct nvkm_inth *inth)
+{
+       struct nvkm_gsp *gsp = container_of(inth, typeof(*gsp), subdev.inth);
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       u32 intr = nvkm_falcon_rd32(&gsp->falcon, 0x0008);
+       u32 inte = nvkm_falcon_rd32(&gsp->falcon, gsp->falcon.func->addr2 +
+                                                 gsp->falcon.func->riscv_irqmask);
+       u32 stat = intr & inte;
+
+       if (!stat) {
+               nvkm_debug(subdev, "inte %08x %08x\n", intr, inte);
+               return IRQ_NONE;
+       }
+
+       if (stat & 0x00000040) {
+               nvkm_falcon_wr32(&gsp->falcon, 0x004, 0x00000040);
+               schedule_work(&gsp->msgq.work);
+               stat &= ~0x00000040;
+       }
+
+       if (stat) {
+               nvkm_error(subdev, "intr %08x\n", stat);
+               nvkm_falcon_wr32(&gsp->falcon, 0x014, stat);
+               nvkm_falcon_wr32(&gsp->falcon, 0x004, stat);
+       }
+
+       nvkm_falcon_intr_retrigger(&gsp->falcon);
+       return IRQ_HANDLED;
+}
+
+static int
+r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
+{
+       NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl;
+       int ret = 0;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+                                   NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, ctrl, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return PTR_ERR(ctrl);
+
+       for (unsigned i = 0; i < ctrl->tableLen; i++) {
+               enum nvkm_subdev_type type;
+               int inst;
+
+               nvkm_debug(&gsp->subdev,
+                          "%2d: engineIdx %3d pmcIntrMask %08x stall %08x nonStall %08x\n", i,
+                          ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask,
+                          ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall);
+
+               switch (ctrl->table[i].engineIdx) {
+               case MC_ENGINE_IDX_GSP:
+                       type = NVKM_SUBDEV_GSP;
+                       inst = 0;
+                       break;
+               case MC_ENGINE_IDX_DISP:
+                       type = NVKM_ENGINE_DISP;
+                       inst = 0;
+                       break;
+               case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9:
+                       type = NVKM_ENGINE_CE;
+                       inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0;
+                       break;
+               case MC_ENGINE_IDX_GR0:
+                       type = NVKM_ENGINE_GR;
+                       inst = 0;
+                       break;
+               case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
+                       type = NVKM_ENGINE_NVDEC;
+                       inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0;
+                       break;
+               case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2:
+                       type = NVKM_ENGINE_NVENC;
+                       inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC;
+                       break;
+               case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
+                       type = NVKM_ENGINE_NVJPG;
+                       inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0;
+                       break;
+               case MC_ENGINE_IDX_OFA0:
+                       type = NVKM_ENGINE_OFA;
+                       inst = 0;
+                       break;
+               default:
+                       continue;
+               }
+
+               if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) {
+                       ret = -ENOSPC;
+                       break;
+               }
+
+               gsp->intr[gsp->intr_nr].type = type;
+               gsp->intr[gsp->intr_nr].inst = inst;
+               gsp->intr[gsp->intr_nr].stall = ctrl->table[i].vectorStall;
+               gsp->intr[gsp->intr_nr].nonstall = ctrl->table[i].vectorNonStall;
+               gsp->intr_nr++;
+       }
+
+       nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+       return ret;
+}
+
+static int
+r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
+{
+       GspStaticConfigInfo *rpc;
+       int last_usable = -1;
+
+       rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
+       if (IS_ERR(rpc))
+               return PTR_ERR(rpc);
+
+       gsp->internal.client.object.client = &gsp->internal.client;
+       gsp->internal.client.object.parent = NULL;
+       gsp->internal.client.object.handle = rpc->hInternalClient;
+       gsp->internal.client.gsp = gsp;
+
+       gsp->internal.device.object.client = &gsp->internal.client;
+       gsp->internal.device.object.parent = &gsp->internal.client.object;
+       gsp->internal.device.object.handle = rpc->hInternalDevice;
+
+       gsp->internal.device.subdevice.client = &gsp->internal.client;
+       gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
+       gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
+
+       gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
+       gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
+
+       for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) {
+               NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg =
+                       &rpc->fbRegionInfoParams.fbRegion[i];
+
+               nvkm_debug(&gsp->subdev, "fb region %d: "
+                          "%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i,
+                          reg->base, reg->limit, reg->reserved, reg->performance,
+                          reg->supportCompressed, reg->supportISO, reg->bProtected);
+
+               if (!reg->reserved && !reg->bProtected) {
+                       if (reg->supportCompressed && reg->supportISO &&
+                           !WARN_ON_ONCE(gsp->fb.region_nr >= ARRAY_SIZE(gsp->fb.region))) {
+                                       const u64 size = (reg->limit + 1) - reg->base;
+
+                                       gsp->fb.region[gsp->fb.region_nr].addr = reg->base;
+                                       gsp->fb.region[gsp->fb.region_nr].size = size;
+                                       gsp->fb.region_nr++;
+                       }
+
+                       last_usable = i;
+               }
+       }
+
+       if (last_usable >= 0) {
+               u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1;
+
+               gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base;
+       }
+
+       for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) {
+               if (rpc->gpcInfo.gpcMask & BIT(gpc)) {
+                       gsp->gr.tpcs += hweight32(rpc->tpcInfo[gpc].tpcMask);
+                       gsp->gr.gpcs++;
+               }
+       }
+
+       nvkm_gsp_rpc_done(gsp, rpc);
+       return 0;
+}
+
+static int
+r535_gsp_postinit(struct nvkm_gsp *gsp)
+{
+       struct nvkm_device *device = gsp->subdev.device;
+       int ret;
+
+       ret = r535_gsp_rpc_get_gsp_static_info(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       INIT_WORK(&gsp->msgq.work, r535_gsp_msgq_work);
+
+       ret = r535_gsp_intr_get_table(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       ret = nvkm_gsp_intr_stall(gsp, gsp->subdev.type, gsp->subdev.inst);
+       if (WARN_ON(ret < 0))
+               return ret;
+
+       ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &gsp->subdev,
+                           r535_gsp_intr, &gsp->subdev.inth);
+       if (WARN_ON(ret))
+               return ret;
+
+       nvkm_inth_allow(&gsp->subdev.inth);
+       nvkm_wr32(device, 0x110004, 0x00000040);
+       return ret;
+}
+
+static int
+r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend)
+{
+       rpc_unloading_guest_driver_v1F_07 *rpc;
+
+       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER, sizeof(*rpc));
+       if (IS_ERR(rpc))
+               return PTR_ERR(rpc);
+
+       if (suspend) {
+               rpc->bInPMTransition = 1;
+               rpc->bGc6Entering = 0;
+               rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3;
+       } else {
+               rpc->bInPMTransition = 0;
+               rpc->bGc6Entering = 0;
+               rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0;
+       }
+
+       return nvkm_gsp_rpc_wr(gsp, rpc, true);
+}
+
+/* dword only */
+struct nv_gsp_registry_entries {
+       const char *name;
+       u32 value;
+};
+
+static const struct nv_gsp_registry_entries r535_registry_entries[] = {
+       { "RMSecBusResetEnable", 1 },
+       { "RMForcePcieConfigSave", 1 },
+};
+#define NV_GSP_REG_NUM_ENTRIES ARRAY_SIZE(r535_registry_entries)
+
+static int
+r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
+{
+       PACKED_REGISTRY_TABLE *rpc;
+       char *strings;
+       int str_offset;
+       int i;
+       size_t rpc_size = sizeof(*rpc) + sizeof(rpc->entries[0]) * NV_GSP_REG_NUM_ENTRIES;
+
+       /* add strings + null terminator */
+       for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++)
+               rpc_size += strlen(r535_registry_entries[i].name) + 1;
+
+       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_SET_REGISTRY, rpc_size);
+       if (IS_ERR(rpc))
+               return PTR_ERR(rpc);
+
+       rpc->size = sizeof(*rpc);
+       rpc->numEntries = NV_GSP_REG_NUM_ENTRIES;
+
+       str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]);
+       strings = (char *)&rpc->entries[NV_GSP_REG_NUM_ENTRIES];
+       for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) {
+               int name_len = strlen(r535_registry_entries[i].name) + 1;
+
+               rpc->entries[i].nameOffset = str_offset;
+               rpc->entries[i].type = 1;
+               rpc->entries[i].data = r535_registry_entries[i].value;
+               rpc->entries[i].length = 4;
+               memcpy(strings, r535_registry_entries[i].name, name_len);
+               strings += name_len;
+               str_offset += name_len;
+       }
+
+       return nvkm_gsp_rpc_wr(gsp, rpc, false);
+}
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+static void
+r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
+{
+       const guid_t NVOP_DSM_GUID =
+               GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B,
+                         0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0);
+       u64 NVOP_DSM_REV = 0x00000100;
+       union acpi_object argv4 = {
+               .buffer.type    = ACPI_TYPE_BUFFER,
+               .buffer.length  = 4,
+               .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
+       }, *obj;
+
+       caps->status = 0xffff;
+
+       if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a)))
+               return;
+
+       obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4);
+       if (!obj)
+               return;
+
+       printk(KERN_ERR "nvop: obj type %d\n", obj->type);
+       printk(KERN_ERR "nvop: obj len %d\n", obj->buffer.length);
+
+       if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
+           WARN_ON(obj->buffer.length != 4))
+               return;
+
+       caps->status = 0;
+       caps->optimusCaps = *(u32 *)obj->buffer.pointer;
+       printk(KERN_ERR "nvop: caps %08x\n", caps->optimusCaps);
+
+       ACPI_FREE(obj);
+
+       kfree(argv4.buffer.pointer);
+}
+
+static void
+r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt)
+{
+       const guid_t JT_DSM_GUID =
+               GUID_INIT(0xCBECA351L, 0x067B, 0x4924,
+                         0x9C, 0xBD, 0xB4, 0x6B, 0x00, 0xB8, 0x6F, 0x34);
+       u64 JT_DSM_REV = 0x00000103;
+       u32 caps;
+       union acpi_object argv4 = {
+               .buffer.type    = ACPI_TYPE_BUFFER,
+               .buffer.length  = sizeof(caps),
+               .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
+       }, *obj;
+
+       jt->status = 0xffff;
+
+       obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4);
+       if (!obj)
+               return;
+
+       printk(KERN_ERR "jt: obj type %d\n", obj->type);
+       printk(KERN_ERR "jt: obj len %d\n", obj->buffer.length);
+
+       if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
+           WARN_ON(obj->buffer.length != 4))
+               return;
+
+       jt->status = 0;
+       jt->jtCaps = *(u32 *)obj->buffer.pointer;
+       jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20;
+       jt->bSBIOSCaps = 0;
+       printk(KERN_ERR "jt: caps %08x rev:%04x\n", jt->jtCaps, jt->jtRevId);
+
+       ACPI_FREE(obj);
+
+       kfree(argv4.buffer.pointer);
+}
+
+static void
+r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode,
+                                                MUX_METHOD_DATA_ELEMENT *part)
+{
+       acpi_handle iter = NULL, handle_mux;
+       acpi_status status;
+       unsigned long long value;
+
+       mode->status = 0xffff;
+       part->status = 0xffff;
+
+       do {
+               status = acpi_get_next_object(ACPI_TYPE_DEVICE, handle, iter, &iter);
+               if (ACPI_FAILURE(status) || !iter)
+                       return;
+
+               status = acpi_evaluate_integer(iter, "_ADR", NULL, &value);
+               if (ACPI_FAILURE(status) || value != id)
+                       continue;
+
+               handle_mux = iter;
+       } while (!handle_mux);
+
+       if (!handle_mux)
+               return;
+
+       status = acpi_evaluate_integer(handle_mux, "MXDM", NULL, &value);
+       if (ACPI_SUCCESS(status)) {
+               mode->acpiId = id;
+               mode->mode   = value;
+               mode->status = 0;
+       }
+
+       status = acpi_evaluate_integer(handle_mux, "MXDS", NULL, &value);
+       if (ACPI_SUCCESS(status)) {
+               part->acpiId = id;
+               part->mode   = value;
+               part->status = 0;
+       }
+}
+
+static void
+r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux)
+{
+       mux->tableLen = dod->acpiIdListLen / sizeof(dod->acpiIdList[0]);
+
+       for (int i = 0; i < mux->tableLen; i++) {
+               r535_gsp_acpi_mux_id(handle, dod->acpiIdList[i], &mux->acpiIdMuxModeTable[i],
+                                                                &mux->acpiIdMuxPartTable[i]);
+       }
+}
+
+static void
+r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod)
+{
+       acpi_status status;
+       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *_DOD;
+
+       dod->status = 0xffff;
+
+       status = acpi_evaluate_object(handle, "_DOD", NULL, &output);
+       if (ACPI_FAILURE(status))
+               return;
+
+       _DOD = output.pointer;
+
+       if (WARN_ON(_DOD->type != ACPI_TYPE_PACKAGE) ||
+           WARN_ON(_DOD->package.count > ARRAY_SIZE(dod->acpiIdList)))
+               return;
+
+       for (int i = 0; i < _DOD->package.count; i++) {
+               if (WARN_ON(_DOD->package.elements[i].type != ACPI_TYPE_INTEGER))
+                       return;
+
+               dod->acpiIdList[i] = _DOD->package.elements[i].integer.value;
+               dod->acpiIdListLen += sizeof(dod->acpiIdList[0]);
+       }
+
+       printk(KERN_ERR "_DOD: ok! len:%d\n", dod->acpiIdListLen);
+       dod->status = 0;
+}
+#endif
+
+static void
+r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi)
+{
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+       acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev);
+
+       if (!handle)
+               return;
+
+       acpi->bValid = 1;
+
+       r535_gsp_acpi_dod(handle, &acpi->dodMethodData);
+       if (acpi->dodMethodData.status == 0)
+               r535_gsp_acpi_mux(handle, &acpi->dodMethodData, &acpi->muxMethodData);
+
+       r535_gsp_acpi_jt(handle, &acpi->jtMethodData);
+       r535_gsp_acpi_caps(handle, &acpi->capsMethodData);
+#endif
+}
+
+static int
+r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp)
+{
+       struct nvkm_device *device = gsp->subdev.device;
+       struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device);
+       GspSystemInfo *info;
+
+       if (WARN_ON(device->type == NVKM_DEVICE_TEGRA))
+               return -ENOSYS;
+
+       info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info));
+       if (IS_ERR(info))
+               return PTR_ERR(info);
+
+       info->gpuPhysAddr = device->func->resource_addr(device, 0);
+       info->gpuPhysFbAddr = device->func->resource_addr(device, 1);
+       info->gpuPhysInstAddr = device->func->resource_addr(device, 3);
+       info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev);
+       info->maxUserVa = TASK_SIZE;
+       info->pciConfigMirrorBase = 0x088000;
+       info->pciConfigMirrorSize = 0x001000;
+       r535_gsp_acpi_info(gsp, &info->acpiMethodData);
+
+       return nvkm_gsp_rpc_wr(gsp, info, false);
+}
+
+static int
+r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc)
+{
+       struct nvkm_gsp *gsp = priv;
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       rpc_os_error_log_v17_00 *msg = repv;
+
+       if (WARN_ON(repc < sizeof(*msg)))
+               return -EINVAL;
+
+       nvkm_error(subdev, "Xid:%d %s\n", msg->exceptType, msg->errString);
+       return 0;
+}
+
+static int
+r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
+{
+       rpc_rc_triggered_v17_02 *msg = repv;
+       struct nvkm_gsp *gsp = priv;
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       struct nvkm_chan *chan;
+       unsigned long flags;
+
+       if (WARN_ON(repc < sizeof(*msg)))
+               return -EINVAL;
+
+       nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n",
+                  msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope,
+                  msg->partitionAttributionId);
+
+       chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid / 8, &flags);
+       if (!chan) {
+               nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid);
+               return 0;
+       }
+
+       nvkm_chan_error(chan, false);
+       nvkm_chan_put(&chan, flags);
+       return 0;
+}
+
+static int
+r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc)
+{
+       struct nvkm_gsp *gsp = priv;
+       struct nvkm_subdev *subdev = &gsp->subdev;
+
+       WARN_ON(repc != 0);
+
+       nvkm_error(subdev, "mmu fault queued\n");
+       return 0;
+}
+
+static int
+r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc)
+{
+       struct nvkm_gsp *gsp = priv;
+       struct nvkm_gsp_client *client;
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       rpc_post_event_v17_00 *msg = repv;
+
+       if (WARN_ON(repc < sizeof(*msg)))
+               return -EINVAL;
+       if (WARN_ON(repc != sizeof(*msg) + msg->eventDataSize))
+               return -EINVAL;
+
+       nvkm_debug(subdev, "event: %08x %08x %d %08x %08x %d %d\n",
+                  msg->hClient, msg->hEvent, msg->notifyIndex, msg->data,
+                  msg->status, msg->eventDataSize, msg->bNotifyList);
+
+       mutex_lock(&gsp->client_id.mutex);
+       client = idr_find(&gsp->client_id.idr, msg->hClient & 0xffff);
+       if (client) {
+               struct nvkm_gsp_event *event;
+               bool handled = false;
+
+               list_for_each_entry(event, &client->events, head) {
+                       if (event->object.handle == msg->hEvent) {
+                               event->func(event, msg->eventData, msg->eventDataSize);
+                               handled = true;
+                       }
+               }
+
+               if (!handled) {
+                       nvkm_error(subdev, "event: cid 0x%08x event 0x%08x not found!\n",
+                                  msg->hClient, msg->hEvent);
+               }
+       } else {
+               nvkm_error(subdev, "event: cid 0x%08x not found!\n", msg->hClient);
+       }
+       mutex_unlock(&gsp->client_id.mutex);
+       return 0;
+}
+
+static int
+r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
+{
+       struct nvkm_gsp *gsp = priv;
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       struct nvkm_device *device = subdev->device;
+       rpc_run_cpu_sequencer_v17_00 *seq = repv;
+       int ptr = 0, ret;
+
+       nvkm_debug(subdev, "seq: %08x %08x\n", seq->bufferSizeDWord, seq->cmdIndex);
+
+       while (ptr < seq->cmdIndex) {
+               GSP_SEQUENCER_BUFFER_CMD *cmd = (void *)&seq->commandBuffer[ptr];
+
+               ptr += 1;
+               ptr += GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(cmd->opCode);
+
+               switch (cmd->opCode) {
+               case GSP_SEQ_BUF_OPCODE_REG_WRITE: {
+                       u32 addr = cmd->payload.regWrite.addr;
+                       u32 data = cmd->payload.regWrite.val;
+
+                       nvkm_trace(subdev, "seq wr32 %06x %08x\n", addr, data);
+                       nvkm_wr32(device, addr, data);
+               }
+                       break;
+               case GSP_SEQ_BUF_OPCODE_REG_MODIFY: {
+                       u32 addr = cmd->payload.regModify.addr;
+                       u32 mask = cmd->payload.regModify.mask;
+                       u32 data = cmd->payload.regModify.val;
+
+                       nvkm_trace(subdev, "seq mask %06x %08x %08x\n", addr, mask, data);
+                       nvkm_mask(device, addr, mask, data);
+               }
+                       break;
+               case GSP_SEQ_BUF_OPCODE_REG_POLL: {
+                       u32 addr = cmd->payload.regPoll.addr;
+                       u32 mask = cmd->payload.regPoll.mask;
+                       u32 data = cmd->payload.regPoll.val;
+                       u32 usec = cmd->payload.regPoll.timeout ?: 4000000;
+                       //u32 error = cmd->payload.regPoll.error;
+
+                       nvkm_trace(subdev, "seq poll %06x %08x %08x %d\n", addr, mask, data, usec);
+                       nvkm_rd32(device, addr);
+                       nvkm_usec(device, usec,
+                               if ((nvkm_rd32(device, addr) & mask) == data)
+                                       break;
+                       );
+               }
+                       break;
+               case GSP_SEQ_BUF_OPCODE_DELAY_US: {
+                       u32 usec = cmd->payload.delayUs.val;
+
+                       nvkm_trace(subdev, "seq usec %d\n", usec);
+                       udelay(usec);
+               }
+                       break;
+               case GSP_SEQ_BUF_OPCODE_REG_STORE: {
+                       u32 addr = cmd->payload.regStore.addr;
+                       u32 slot = cmd->payload.regStore.index;
+
+                       seq->regSaveArea[slot] = nvkm_rd32(device, addr);
+                       nvkm_trace(subdev, "seq save %08x -> %d: %08x\n", addr, slot,
+                                  seq->regSaveArea[slot]);
+               }
+                       break;
+               case GSP_SEQ_BUF_OPCODE_CORE_RESET:
+                       nvkm_trace(subdev, "seq core reset\n");
+                       nvkm_falcon_reset(&gsp->falcon);
+                       nvkm_falcon_mask(&gsp->falcon, 0x624, 0x00000080, 0x00000080);
+                       nvkm_falcon_wr32(&gsp->falcon, 0x10c, 0x00000000);
+                       break;
+               case GSP_SEQ_BUF_OPCODE_CORE_START:
+                       nvkm_trace(subdev, "seq core start\n");
+                       if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000040)
+                               nvkm_falcon_wr32(&gsp->falcon, 0x130, 0x00000002);
+                       else
+                               nvkm_falcon_wr32(&gsp->falcon, 0x100, 0x00000002);
+                       break;
+               case GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT:
+                       nvkm_trace(subdev, "seq core wait halt\n");
+                       nvkm_msec(device, 2000,
+                               if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000010)
+                                       break;
+                       );
+                       break;
+               case GSP_SEQ_BUF_OPCODE_CORE_RESUME: {
+                       struct nvkm_sec2 *sec2 = device->sec2;
+                       u32 mbox0;
+
+                       nvkm_trace(subdev, "seq core resume\n");
+
+                       ret = gsp->func->reset(gsp);
+                       if (WARN_ON(ret))
+                               return ret;
+
+                       nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr));
+                       nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr));
+
+                       nvkm_falcon_start(&sec2->falcon);
+
+                       if (nvkm_msec(device, 2000,
+                               if (nvkm_rd32(device, 0x1180f8) & 0x04000000)
+                                       break;
+                       ) < 0)
+                               return -ETIMEDOUT;
+
+                       mbox0 = nvkm_falcon_rd32(&sec2->falcon, 0x040);
+                       if (WARN_ON(mbox0)) {
+                               nvkm_error(&gsp->subdev, "seq core resume sec2: 0x%x\n", mbox0);
+                               return -EIO;
+                       }
+
+                       nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
+
+                       if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
+                               return -EIO;
+               }
+                       break;
+               default:
+                       nvkm_error(subdev, "unknown sequencer opcode %08x\n", cmd->opCode);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static void
+nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem)
+{
+       if (mem->data) {
+               dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr);
+               mem->data = NULL;
+       }
+}
+
+static int
+nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, u32 size, struct nvkm_gsp_mem *mem)
+{
+       mem->size = size;
+       mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
+       if (WARN_ON(!mem->data))
+               return -ENOMEM;
+
+       return 0;
+}
+
+
+static int
+r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
+{
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       struct nvkm_device *device = subdev->device;
+       u32 wpr2_hi;
+       int ret;
+
+       wpr2_hi = nvkm_rd32(device, 0x1fa828);
+       if (!wpr2_hi) {
+               nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n");
+               return 0;
+       }
+
+       ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
+       if (WARN_ON(ret))
+               return ret;
+
+       wpr2_hi = nvkm_rd32(device, 0x1fa828);
+       if (WARN_ON(wpr2_hi))
+               return -EIO;
+
+       return 0;
+}
+
+static int
+r535_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
+{
+       int ret;
+
+       ret = nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
+       if (ret)
+               return ret;
+
+       nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
+
+       if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
+               return -EIO;
+
+       return 0;
+}
+
+static int
+r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
+{
+       GspFwWprMeta *meta;
+       int ret;
+
+       ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta);
+       if (ret)
+               return ret;
+
+       meta = gsp->wpr_meta.data;
+
+       meta->magic = GSP_FW_WPR_META_MAGIC;
+       meta->revision = GSP_FW_WPR_META_REVISION;
+
+       meta->sysmemAddrOfRadix3Elf = gsp->radix3.mem[0].addr;
+       meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size;
+
+       meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
+       meta->sizeOfBootloader = gsp->boot.fw.size;
+       meta->bootloaderCodeOffset = gsp->boot.code_offset;
+       meta->bootloaderDataOffset = gsp->boot.data_offset;
+       meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
+
+       meta->sysmemAddrOfSignature = gsp->sig.addr;
+       meta->sizeOfSignature = gsp->sig.size;
+
+       meta->gspFwRsvdStart = gsp->fb.heap.addr;
+       meta->nonWprHeapOffset = gsp->fb.heap.addr;
+       meta->nonWprHeapSize = gsp->fb.heap.size;
+       meta->gspFwWprStart = gsp->fb.wpr2.addr;
+       meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr;
+       meta->gspFwHeapSize = gsp->fb.wpr2.heap.size;
+       meta->gspFwOffset = gsp->fb.wpr2.elf.addr;
+       meta->bootBinOffset = gsp->fb.wpr2.boot.addr;
+       meta->frtsOffset = gsp->fb.wpr2.frts.addr;
+       meta->frtsSize = gsp->fb.wpr2.frts.size;
+       meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000);
+       meta->fbSize = gsp->fb.size;
+       meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr;
+       meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
+       meta->bootCount = 0;
+       meta->partitionRpcAddr = 0;
+       meta->partitionRpcRequestOffset = 0;
+       meta->partitionRpcReplyOffset = 0;
+       meta->verified = 0;
+       return 0;
+}
+
+static int
+r535_gsp_shared_init(struct nvkm_gsp *gsp)
+{
+       struct {
+               msgqTxHeader tx;
+               msgqRxHeader rx;
+       } *cmdq, *msgq;
+       int ret, i;
+
+       gsp->shm.cmdq.size = 0x40000;
+       gsp->shm.msgq.size = 0x40000;
+
+       gsp->shm.ptes.nr  = (gsp->shm.cmdq.size + gsp->shm.msgq.size) >> GSP_PAGE_SHIFT;
+       gsp->shm.ptes.nr += DIV_ROUND_UP(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE);
+       gsp->shm.ptes.size = ALIGN(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE);
+
+       ret = nvkm_gsp_mem_ctor(gsp, gsp->shm.ptes.size +
+                                    gsp->shm.cmdq.size +
+                                    gsp->shm.msgq.size,
+                               &gsp->shm.mem);
+       if (ret)
+               return ret;
+
+       gsp->shm.ptes.ptr = gsp->shm.mem.data;
+       gsp->shm.cmdq.ptr = (u8 *)gsp->shm.ptes.ptr + gsp->shm.ptes.size;
+       gsp->shm.msgq.ptr = (u8 *)gsp->shm.cmdq.ptr + gsp->shm.cmdq.size;
+
+       for (i = 0; i < gsp->shm.ptes.nr; i++)
+               gsp->shm.ptes.ptr[i] = gsp->shm.mem.addr + (i << GSP_PAGE_SHIFT);
+
+       cmdq = gsp->shm.cmdq.ptr;
+       cmdq->tx.version = 0;
+       cmdq->tx.size = gsp->shm.cmdq.size;
+       cmdq->tx.entryOff = GSP_PAGE_SIZE;
+       cmdq->tx.msgSize = GSP_PAGE_SIZE;
+       cmdq->tx.msgCount = (cmdq->tx.size - cmdq->tx.entryOff) / cmdq->tx.msgSize;
+       cmdq->tx.writePtr = 0;
+       cmdq->tx.flags = 1;
+       cmdq->tx.rxHdrOff = offsetof(typeof(*cmdq), rx.readPtr);
+
+       msgq = gsp->shm.msgq.ptr;
+
+       gsp->cmdq.cnt = cmdq->tx.msgCount;
+       gsp->cmdq.wptr = &cmdq->tx.writePtr;
+       gsp->cmdq.rptr = &msgq->rx.readPtr;
+       gsp->msgq.cnt = cmdq->tx.msgCount;
+       gsp->msgq.wptr = &msgq->tx.writePtr;
+       gsp->msgq.rptr = &cmdq->rx.readPtr;
+       return 0;
+}
+
+static int
+r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
+{
+       GSP_ARGUMENTS_CACHED *args;
+       int ret;
+
+       if (!resume) {
+               ret = r535_gsp_shared_init(gsp);
+               if (ret)
+                       return ret;
+
+               ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs);
+               if (ret)
+                       return ret;
+       }
+
+       args = gsp->rmargs.data;
+       args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr;
+       args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr;
+       args->messageQueueInitArguments.cmdQueueOffset =
+               (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data;
+       args->messageQueueInitArguments.statQueueOffset =
+               (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data;
+
+       if (!resume) {
+               args->srInitArguments.oldLevel = 0;
+               args->srInitArguments.flags = 0;
+               args->srInitArguments.bInPMTransition = 0;
+       } else {
+               args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3;
+               args->srInitArguments.flags = 0;
+               args->srInitArguments.bInPMTransition = 1;
+       }
+
+       return 0;
+}
+
+static inline u64
+r535_gsp_libos_id8(const char *name)
+{
+       u64 id = 0;
+
+       for (int i = 0; i < sizeof(id) && *name; i++, name++)
+               id = (id << 8) | *name;
+
+       return id;
+}
+
+static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
+{
+       unsigned int num_pages = DIV_ROUND_UP_ULL(size, GSP_PAGE_SIZE);
+       unsigned int i;
+
+       for (i = 0; i < num_pages; i++)
+               ptes[i] = (u64)addr + (i << GSP_PAGE_SHIFT);
+}
+
+static int
+r535_gsp_libos_init(struct nvkm_gsp *gsp)
+{
+       LibosMemoryRegionInitArgument *args;
+       int ret;
+
+       ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->libos);
+       if (ret)
+               return ret;
+
+       args = gsp->libos.data;
+
+       ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->loginit);
+       if (ret)
+               return ret;
+
+       args[0].id8  = r535_gsp_libos_id8("LOGINIT");
+       args[0].pa   = gsp->loginit.addr;
+       args[0].size = gsp->loginit.size;
+       args[0].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
+       args[0].loc  = LIBOS_MEMORY_REGION_LOC_SYSMEM;
+       create_pte_array(gsp->loginit.data + sizeof(u64), gsp->loginit.addr, gsp->loginit.size);
+
+       ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logintr);
+       if (ret)
+               return ret;
+
+       args[1].id8  = r535_gsp_libos_id8("LOGINTR");
+       args[1].pa   = gsp->logintr.addr;
+       args[1].size = gsp->logintr.size;
+       args[1].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
+       args[1].loc  = LIBOS_MEMORY_REGION_LOC_SYSMEM;
+       create_pte_array(gsp->logintr.data + sizeof(u64), gsp->logintr.addr, gsp->logintr.size);
+
+       ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logrm);
+       if (ret)
+               return ret;
+
+       args[2].id8  = r535_gsp_libos_id8("LOGRM");
+       args[2].pa   = gsp->logrm.addr;
+       args[2].size = gsp->logrm.size;
+       args[2].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
+       args[2].loc  = LIBOS_MEMORY_REGION_LOC_SYSMEM;
+       create_pte_array(gsp->logrm.data + sizeof(u64), gsp->logrm.addr, gsp->logrm.size);
+
+       ret = r535_gsp_rmargs_init(gsp, false);
+       if (ret)
+               return ret;
+
+       args[3].id8  = r535_gsp_libos_id8("RMARGS");
+       args[3].pa   = gsp->rmargs.addr;
+       args[3].size = gsp->rmargs.size;
+       args[3].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
+       args[3].loc  = LIBOS_MEMORY_REGION_LOC_SYSMEM;
+       return 0;
+}
+
+void
+nvkm_gsp_sg_free(struct nvkm_device *device, struct sg_table *sgt)
+{
+       struct scatterlist *sgl;
+       int i;
+
+       dma_unmap_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0);
+
+       for_each_sgtable_sg(sgt, sgl, i) {
+               struct page *page = sg_page(sgl);
+
+               __free_page(page);
+       }
+
+       sg_free_table(sgt);
+}
+
+int
+nvkm_gsp_sg(struct nvkm_device *device, u64 size, struct sg_table *sgt)
+{
+       const u64 pages = DIV_ROUND_UP(size, PAGE_SIZE);
+       struct scatterlist *sgl;
+       int ret, i;
+
+       ret = sg_alloc_table(sgt, pages, GFP_KERNEL);
+       if (ret)
+               return ret;
+
+       for_each_sgtable_sg(sgt, sgl, i) {
+               struct page *page = alloc_page(GFP_KERNEL);
+
+               if (!page) {
+                       nvkm_gsp_sg_free(device, sgt);
+                       return -ENOMEM;
+               }
+
+               sg_set_page(sgl, page, PAGE_SIZE, 0);
+       }
+
+       ret = dma_map_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0);
+       if (ret)
+               nvkm_gsp_sg_free(device, sgt);
+
+       return ret;
+}
+
+static void
+nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
+{
+       for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--)
+               nvkm_gsp_mem_dtor(gsp, &rx3->mem[i]);
+}
+
+static int
+nvkm_gsp_radix3_sg(struct nvkm_device *device, struct sg_table *sgt, u64 size,
+                  struct nvkm_gsp_radix3 *rx3)
+{
+       u64 addr;
+
+       for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) {
+               u64 *ptes;
+               int idx;
+
+               rx3->mem[i].size = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
+               rx3->mem[i].data = dma_alloc_coherent(device->dev, rx3->mem[i].size,
+                                                     &rx3->mem[i].addr, GFP_KERNEL);
+               if (WARN_ON(!rx3->mem[i].data))
+                       return -ENOMEM;
+
+               ptes = rx3->mem[i].data;
+               if (i == 2) {
+                       struct scatterlist *sgl;
+
+                       for_each_sgtable_dma_sg(sgt, sgl, idx) {
+                               for (int j = 0; j < sg_dma_len(sgl) / GSP_PAGE_SIZE; j++)
+                                       *ptes++ = sg_dma_address(sgl) + (GSP_PAGE_SIZE * j);
+                       }
+               } else {
+                       for (int j = 0; j < size / GSP_PAGE_SIZE; j++)
+                               *ptes++ = addr + GSP_PAGE_SIZE * j;
+               }
+
+               size = rx3->mem[i].size;
+               addr = rx3->mem[i].addr;
+       }
+
+       return 0;
+}
+
+int
+r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
+{
+       u32 mbox0 = 0xff, mbox1 = 0xff;
+       int ret;
+
+       if (!gsp->running)
+               return 0;
+
+       if (suspend) {
+               GspFwWprMeta *meta = gsp->wpr_meta.data;
+               u64 len = meta->gspFwWprEnd - meta->gspFwWprStart;
+               GspFwSRMeta *sr;
+
+               ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt);
+               if (ret)
+                       return ret;
+
+               ret = nvkm_gsp_radix3_sg(gsp->subdev.device, &gsp->sr.sgt, len, &gsp->sr.radix3);
+               if (ret)
+                       return ret;
+
+               ret = nvkm_gsp_mem_ctor(gsp, sizeof(*sr), &gsp->sr.meta);
+               if (ret)
+                       return ret;
+
+               sr = gsp->sr.meta.data;
+               sr->magic = GSP_FW_SR_META_MAGIC;
+               sr->revision = GSP_FW_SR_META_REVISION;
+               sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.mem[0].addr;
+               sr->sizeOfSuspendResumeData = len;
+
+               mbox0 = lower_32_bits(gsp->sr.meta.addr);
+               mbox1 = upper_32_bits(gsp->sr.meta.addr);
+       }
+
+       ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend);
+       if (WARN_ON(ret))
+               return ret;
+
+       nvkm_msec(gsp->subdev.device, 2000,
+               if (nvkm_falcon_rd32(&gsp->falcon, 0x040) & 0x80000000)
+                       break;
+       );
+
+       nvkm_falcon_reset(&gsp->falcon);
+
+       ret = nvkm_gsp_fwsec_sb(gsp);
+       WARN_ON(ret);
+
+       ret = r535_gsp_booter_unload(gsp, mbox0, mbox1);
+       WARN_ON(ret);
+
+       gsp->running = false;
+       return 0;
+}
+
+int
+r535_gsp_init(struct nvkm_gsp *gsp)
+{
+       u32 mbox0, mbox1;
+       int ret;
+
+       if (!gsp->sr.meta.data) {
+               mbox0 = lower_32_bits(gsp->wpr_meta.addr);
+               mbox1 = upper_32_bits(gsp->wpr_meta.addr);
+       } else {
+               r535_gsp_rmargs_init(gsp, true);
+
+               mbox0 = lower_32_bits(gsp->sr.meta.addr);
+               mbox1 = upper_32_bits(gsp->sr.meta.addr);
+       }
+
+       /* Execute booter to handle (eventually...) booting GSP-RM. */
+       ret = r535_gsp_booter_load(gsp, mbox0, mbox1);
+       if (WARN_ON(ret))
+               goto done;
+
+       ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE);
+       if (ret)
+               goto done;
+
+       gsp->running = true;
+
+done:
+       if (gsp->sr.meta.data) {
+               nvkm_gsp_mem_dtor(gsp, &gsp->sr.meta);
+               nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
+               nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt);
+               return ret;
+       }
+
+       if (ret == 0)
+               ret = r535_gsp_postinit(gsp);
+
+       return ret;
+}
+
+static int
+r535_gsp_rm_boot_ctor(struct nvkm_gsp *gsp)
+{
+       const struct firmware *fw = gsp->fws.bl;
+       const struct nvfw_bin_hdr *hdr;
+       RM_RISCV_UCODE_DESC *desc;
+       int ret;
+
+       hdr = nvfw_bin_hdr(&gsp->subdev, fw->data);
+       desc = (void *)fw->data + hdr->header_offset;
+
+       ret = nvkm_gsp_mem_ctor(gsp, hdr->data_size, &gsp->boot.fw);
+       if (ret)
+               return ret;
+
+       memcpy(gsp->boot.fw.data, fw->data + hdr->data_offset, hdr->data_size);
+
+       gsp->boot.code_offset = desc->monitorCodeOffset;
+       gsp->boot.data_offset = desc->monitorDataOffset;
+       gsp->boot.manifest_offset = desc->manifestOffset;
+       gsp->boot.app_version = desc->appVersion;
+       return 0;
+}
+
+static const struct nvkm_firmware_func
+r535_gsp_fw = {
+       .type = NVKM_FIRMWARE_IMG_SGT,
+};
+
+static int
+r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u64 *psize)
+{
+       const u8 *img = gsp->fws.rm->data;
+       const struct elf64_hdr *ehdr = (const struct elf64_hdr *)img;
+       const struct elf64_shdr *shdr = (const struct elf64_shdr *)&img[ehdr->e_shoff];
+       const char *names = &img[shdr[ehdr->e_shstrndx].sh_offset];
+
+       for (int i = 0; i < ehdr->e_shnum; i++, shdr++) {
+               if (!strcmp(&names[shdr->sh_name], name)) {
+                       *pdata = &img[shdr->sh_offset];
+                       *psize = shdr->sh_size;
+                       return 0;
+               }
+       }
+
+       nvkm_error(&gsp->subdev, "section '%s' not found\n", name);
+       return -ENOENT;
+}
+
+static void
+r535_gsp_dtor_fws(struct nvkm_gsp *gsp)
+{
+       nvkm_firmware_put(gsp->fws.bl);
+       gsp->fws.bl = NULL;
+       nvkm_firmware_put(gsp->fws.booter.unload);
+       gsp->fws.booter.unload = NULL;
+       nvkm_firmware_put(gsp->fws.booter.load);
+       gsp->fws.booter.load = NULL;
+       nvkm_firmware_put(gsp->fws.rm);
+       gsp->fws.rm = NULL;
+}
+
+void
+r535_gsp_dtor(struct nvkm_gsp *gsp)
+{
+       idr_destroy(&gsp->client_id.idr);
+       mutex_destroy(&gsp->client_id.mutex);
+
+       nvkm_gsp_radix3_dtor(gsp, &gsp->radix3);
+       nvkm_gsp_mem_dtor(gsp, &gsp->sig);
+       nvkm_firmware_dtor(&gsp->fw);
+
+       nvkm_falcon_fw_dtor(&gsp->booter.unload);
+       nvkm_falcon_fw_dtor(&gsp->booter.load);
+
+       mutex_destroy(&gsp->msgq.mutex);
+       mutex_destroy(&gsp->cmdq.mutex);
+
+       r535_gsp_dtor_fws(gsp);
+}
+
+int
+r535_gsp_oneinit(struct nvkm_gsp *gsp)
+{
+       struct nvkm_device *device = gsp->subdev.device;
+       const u8 *data;
+       u64 size;
+       int ret;
+
+       mutex_init(&gsp->cmdq.mutex);
+       mutex_init(&gsp->msgq.mutex);
+
+       ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load,
+                                    &device->sec2->falcon, &gsp->booter.load);
+       if (ret)
+               return ret;
+
+       ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload,
+                                    &device->sec2->falcon, &gsp->booter.unload);
+       if (ret)
+               return ret;
+
+       /* Load GSP firmware from ELF image into DMA-accessible memory. */
+       ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size);
+       if (ret)
+               return ret;
+
+       ret = nvkm_firmware_ctor(&r535_gsp_fw, "gsp-rm", device, data, size, &gsp->fw);
+       if (ret)
+               return ret;
+
+       /* Load relevant signature from ELF image. */
+       ret = r535_gsp_elf_section(gsp, gsp->func->sig_section, &data, &size);
+       if (ret)
+               return ret;
+
+       ret = nvkm_gsp_mem_ctor(gsp, ALIGN(size, 256), &gsp->sig);
+       if (ret)
+               return ret;
+
+       memcpy(gsp->sig.data, data, size);
+
+       /* Build radix3 page table for ELF image. */
+       ret = nvkm_gsp_radix3_sg(device, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3);
+       if (ret)
+               return ret;
+
+       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER,
+                             r535_gsp_msg_run_cpu_sequencer, gsp);
+       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp);
+       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED,
+                             r535_gsp_msg_rc_triggered, gsp);
+       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED,
+                             r535_gsp_msg_mmu_fault_queued, gsp);
+       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp);
+
+       ret = r535_gsp_rm_boot_ctor(gsp);
+       if (ret)
+               return ret;
+
+       /* Release FW images - we've copied them to DMA buffers now. */
+       r535_gsp_dtor_fws(gsp);
+
+       /* Calculate FB layout. */
+       gsp->fb.wpr2.frts.size = 0x100000;
+       gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size;
+
+       gsp->fb.wpr2.boot.size = gsp->boot.fw.size;
+       gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000);
+
+       gsp->fb.wpr2.elf.size = gsp->fw.len;
+       gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000);
+
+       {
+               u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30);
+
+               gsp->fb.wpr2.heap.size =
+                       gsp->func->wpr_heap.os_carveout_size +
+                       gsp->func->wpr_heap.base_size +
+                       ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) +
+                       ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20);
+
+               gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size);
+       }
+
+       gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000);
+       gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000);
+
+       gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000);
+       gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr;
+
+       gsp->fb.heap.size = 0x100000;
+       gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size;
+
+       ret = nvkm_gsp_fwsec_frts(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       ret = r535_gsp_libos_init(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       ret = r535_gsp_wpr_meta_init(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       ret = r535_gsp_rpc_set_system_info(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       ret = r535_gsp_rpc_set_registry(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       /* Reset GSP into RISC-V mode. */
+       ret = gsp->func->reset(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr));
+       nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr));
+
+       mutex_init(&gsp->client_id.mutex);
+       idr_init(&gsp->client_id.idr);
+       return 0;
+}
+
+static int
+r535_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver,
+                const struct firmware **pfw)
+{
+       char fwname[64];
+
+       snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver);
+       return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw);
+}
+
+int
+r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
+{
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       int ret;
+
+       if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", fwif->enable))
+               return -EINVAL;
+
+       if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) ||
+           (ret = r535_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load)) ||
+           (ret = r535_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload)) ||
+           (ret = r535_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl))) {
+               r535_gsp_dtor_fws(gsp);
+               return ret;
+       }
+
+       return 0;
+}
+
+#define NVKM_GSP_FIRMWARE(chip)                                  \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-535.113.01.bin");   \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-535.113.01.bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-535.113.01.bin");    \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-535.113.01.bin")
+
+NVKM_GSP_FIRMWARE(tu102);
+NVKM_GSP_FIRMWARE(tu104);
+NVKM_GSP_FIRMWARE(tu106);
+
+NVKM_GSP_FIRMWARE(tu116);
+NVKM_GSP_FIRMWARE(tu117);
+
+NVKM_GSP_FIRMWARE(ga100);
+
+NVKM_GSP_FIRMWARE(ga102);
+NVKM_GSP_FIRMWARE(ga103);
+NVKM_GSP_FIRMWARE(ga104);
+NVKM_GSP_FIRMWARE(ga106);
+NVKM_GSP_FIRMWARE(ga107);
+
+NVKM_GSP_FIRMWARE(ad102);
+NVKM_GSP_FIRMWARE(ad103);
+NVKM_GSP_FIRMWARE(ad104);
+NVKM_GSP_FIRMWARE(ad106);
+NVKM_GSP_FIRMWARE(ad107);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
new file mode 100644 (file)
index 0000000..59c5f2b
--- /dev/null
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2022 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/fb.h>
+
+#include <nvfw/flcn.h>
+#include <nvfw/fw.h>
+#include <nvfw/hs.h>
+
+int
+tu102_gsp_booter_ctor(struct nvkm_gsp *gsp, const char *name, const struct firmware *blob,
+                     struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw)
+{
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       const struct nvkm_falcon_fw_func *func = &gm200_flcn_fw;
+       const struct nvfw_bin_hdr *hdr;
+       const struct nvfw_hs_header_v2 *hshdr;
+       const struct nvfw_hs_load_header_v2 *lhdr;
+       u32 loc, sig, cnt;
+       int ret;
+
+       hdr = nvfw_bin_hdr(subdev, blob->data);
+       hshdr = nvfw_hs_header_v2(subdev, blob->data + hdr->header_offset);
+       loc = *(u32 *)(blob->data + hshdr->patch_loc);
+       sig = *(u32 *)(blob->data + hshdr->patch_sig);
+       cnt = *(u32 *)(blob->data + hshdr->num_sig);
+
+       ret = nvkm_falcon_fw_ctor(func, name, subdev->device, true,
+                                 blob->data + hdr->data_offset, hdr->data_size, falcon, fw);
+       if (ret)
+               goto done;
+
+       ret = nvkm_falcon_fw_sign(fw, loc, hshdr->sig_prod_size / cnt, blob->data,
+                                 cnt, hshdr->sig_prod_offset + sig, 0, 0);
+       if (ret)
+               goto done;
+
+       lhdr = nvfw_hs_load_header_v2(subdev, blob->data + hshdr->header_offset);
+
+       fw->nmem_base_img = 0;
+       fw->nmem_base = lhdr->os_code_offset;
+       fw->nmem_size = lhdr->os_code_size;
+       fw->imem_base_img = fw->nmem_size;
+       fw->imem_base = lhdr->app[0].offset;
+       fw->imem_size = lhdr->app[0].size;
+       fw->dmem_base_img = lhdr->os_data_offset;
+       fw->dmem_base = 0;
+       fw->dmem_size = lhdr->os_data_size;
+       fw->dmem_sign = loc - fw->dmem_base_img;
+       fw->boot_addr = lhdr->os_code_offset;
+
+done:
+       if (ret)
+               nvkm_falcon_fw_dtor(fw);
+
+       return ret;
+}
+
+static int
+tu102_gsp_fwsec_load_bld(struct nvkm_falcon_fw *fw)
+{
+       struct flcn_bl_dmem_desc_v2 desc = {
+               .ctx_dma = FALCON_DMAIDX_PHYS_SYS_NCOH,
+               .code_dma_base = fw->fw.phys,
+               .non_sec_code_off = fw->nmem_base,
+               .non_sec_code_size = fw->nmem_size,
+               .sec_code_off = fw->imem_base,
+               .sec_code_size = fw->imem_size,
+               .code_entry_point = 0,
+               .data_dma_base = fw->fw.phys + fw->dmem_base_img,
+               .data_size = fw->dmem_size,
+               .argc = 0,
+               .argv = 0,
+       };
+
+       flcn_bl_dmem_desc_v2_dump(fw->falcon->user, &desc);
+
+       nvkm_falcon_mask(fw->falcon, 0x600 + desc.ctx_dma * 4, 0x00000007, 0x00000005);
+
+       return nvkm_falcon_pio_wr(fw->falcon, (u8 *)&desc, 0, 0, DMEM, 0, sizeof(desc), 0, 0);
+}
+
+const struct nvkm_falcon_fw_func
+tu102_gsp_fwsec = {
+       .reset = gm200_flcn_fw_reset,
+       .load = gm200_flcn_fw_load,
+       .load_bld = tu102_gsp_fwsec_load_bld,
+       .boot = gm200_flcn_fw_boot,
+};
+
+int
+tu102_gsp_reset(struct nvkm_gsp *gsp)
+{
+       return gsp->falcon.func->reset_eng(&gsp->falcon);
+}
+
+static u64
+tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size)
+{
+       struct nvkm_device *device = gsp->subdev.device;
+       const u64 base = fb_size - 0x100000;
+       u64 addr = 0;
+
+       if (device->disp)
+               addr = nvkm_rd32(gsp->subdev.device, 0x625f04);
+       if (!(addr & 0x00000008))
+               return base;
+
+       addr = (addr & 0xffffff00) << 8;
+       if (addr < base)
+               return fb_size - 0x20000;
+
+       return addr;
+}
+
+int
+tu102_gsp_oneinit(struct nvkm_gsp *gsp)
+{
+       gsp->fb.size = nvkm_fb_vidmem_size(gsp->subdev.device);
+
+       gsp->fb.bios.vga_workspace.addr = tu102_gsp_vga_workspace_addr(gsp, gsp->fb.size);
+       gsp->fb.bios.vga_workspace.size = gsp->fb.size - gsp->fb.bios.vga_workspace.addr;
+       gsp->fb.bios.addr = gsp->fb.bios.vga_workspace.addr;
+       gsp->fb.bios.size = gsp->fb.bios.vga_workspace.size;
+
+       return r535_gsp_oneinit(gsp);
+}
+
+const struct nvkm_falcon_func
+tu102_gsp_flcn = {
+       .disable = gm200_flcn_disable,
+       .enable = gm200_flcn_enable,
+       .addr2 = 0x1000,
+       .riscv_irqmask = 0x2b4,
+       .reset_eng = gp102_flcn_reset_eng,
+       .reset_wait_mem_scrubbing = gm200_flcn_reset_wait_mem_scrubbing,
+       .bind_inst = gm200_flcn_bind_inst,
+       .bind_stat = gm200_flcn_bind_stat,
+       .bind_intr = true,
+       .imem_pio = &gm200_flcn_imem_pio,
+       .dmem_pio = &gm200_flcn_dmem_pio,
+       .riscv_active = tu102_flcn_riscv_active,
+};
+
+static const struct nvkm_gsp_func
+tu102_gsp_r535_113_01 = {
+       .flcn = &tu102_gsp_flcn,
+       .fwsec = &tu102_gsp_fwsec,
+
+       .sig_section = ".fwsignature_tu10x",
+
+       .wpr_heap.base_size = 8 << 20,
+       .wpr_heap.min_size = 64 << 20,
+
+       .booter.ctor = tu102_gsp_booter_ctor,
+
+       .dtor = r535_gsp_dtor,
+       .oneinit = tu102_gsp_oneinit,
+       .init = r535_gsp_init,
+       .fini = r535_gsp_fini,
+       .reset = tu102_gsp_reset,
+
+       .rm = &r535_gsp_rm,
+};
+
+static struct nvkm_gsp_fwif
+tu102_gsps[] = {
+       {  0,  r535_gsp_load, &tu102_gsp_r535_113_01, "535.113.01" },
+       { -1, gv100_gsp_nofw, &gv100_gsp },
+       {}
+};
+
+int
+tu102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+             struct nvkm_gsp **pgsp)
+{
+       return nvkm_gsp_new_(tu102_gsps, device, type, inst, pgsp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
new file mode 100644 (file)
index 0000000..04fbd9e
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2022 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static const struct nvkm_gsp_func
+tu116_gsp_r535_113_01 = {
+       .flcn = &tu102_gsp_flcn,
+       .fwsec = &tu102_gsp_fwsec,
+
+       .sig_section = ".fwsignature_tu11x",
+
+       .wpr_heap.base_size = 8 << 20,
+       .wpr_heap.min_size = 64 << 20,
+
+       .booter.ctor = tu102_gsp_booter_ctor,
+
+       .dtor = r535_gsp_dtor,
+       .oneinit = tu102_gsp_oneinit,
+       .init = r535_gsp_init,
+       .fini = r535_gsp_fini,
+       .reset = tu102_gsp_reset,
+
+       .rm = &r535_gsp_rm,
+};
+
+static struct nvkm_gsp_fwif
+tu116_gsps[] = {
+       {  0,  r535_gsp_load, &tu116_gsp_r535_113_01, "535.113.01" },
+       { -1, gv100_gsp_nofw, &gv100_gsp },
+       {}
+};
+
+int
+tu116_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+             struct nvkm_gsp **pgsp)
+{
+       return nvkm_gsp_new_(tu116_gsps, device, type, inst, pgsp);
+}
index 46917eb600f9dd3b929f53cf3feeb0b8c3684d47..0494775113128f3fb8981e202af94150e38e317a 100644 (file)
@@ -24,6 +24,8 @@
 #include "priv.h"
 #include "pad.h"
 
+#include <subdev/gsp.h>
+
 static void
 gm200_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
 {
@@ -44,5 +46,8 @@ int
 gm200_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_i2c **pi2c)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_i2c_new_(&gm200_i2c, device, type, inst, pi2c);
 }
index 06cbe19ce3766e9f182be86268a6fea800b56ec5..553d540f27365a625828c519a2597de7a45083af 100644 (file)
@@ -4,3 +4,5 @@ nvkm-y += nvkm/subdev/instmem/nv04.o
 nvkm-y += nvkm/subdev/instmem/nv40.o
 nvkm-y += nvkm/subdev/instmem/nv50.o
 nvkm-y += nvkm/subdev/instmem/gk20a.o
+
+nvkm-y += nvkm/subdev/instmem/r535.o
index 24886eabe8dc3f2eaa655f39decb9e12d2b03f3f..a2cd3330efc66f418ae23c5ffb7ef3ee9afb57f6 100644 (file)
@@ -28,7 +28,7 @@
 /******************************************************************************
  * instmem object base implementation
  *****************************************************************************/
-static void
+void
 nvkm_instobj_load(struct nvkm_instobj *iobj)
 {
        struct nvkm_memory *memory = &iobj->memory;
@@ -48,7 +48,7 @@ nvkm_instobj_load(struct nvkm_instobj *iobj)
        iobj->suspend = NULL;
 }
 
-static int
+int
 nvkm_instobj_save(struct nvkm_instobj *iobj)
 {
        struct nvkm_memory *memory = &iobj->memory;
@@ -179,24 +179,14 @@ static int
 nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
 {
        struct nvkm_instmem *imem = nvkm_instmem(subdev);
-       struct nvkm_instobj *iobj;
+       int ret;
 
        if (suspend) {
-               list_for_each_entry(iobj, &imem->list, head) {
-                       if (iobj->preserve) {
-                               int ret = nvkm_instobj_save(iobj);
-                               if (ret)
-                                       return ret;
-                       }
-               }
-
-               nvkm_bar_bar2_fini(subdev->device);
+               ret = imem->func->suspend(imem);
+               if (ret)
+                       return ret;
 
-               list_for_each_entry(iobj, &imem->boot, head) {
-                       int ret = nvkm_instobj_save(iobj);
-                       if (ret)
-                               return ret;
-               }
+               imem->suspend = true;
        }
 
        if (imem->func->fini)
@@ -209,20 +199,16 @@ static int
 nvkm_instmem_init(struct nvkm_subdev *subdev)
 {
        struct nvkm_instmem *imem = nvkm_instmem(subdev);
-       struct nvkm_instobj *iobj;
 
-       list_for_each_entry(iobj, &imem->boot, head) {
-               if (iobj->suspend)
-                       nvkm_instobj_load(iobj);
-       }
+       if (imem->suspend) {
+               if (imem->func->resume)
+                       imem->func->resume(imem);
 
-       nvkm_bar_bar2_init(subdev->device);
-
-       list_for_each_entry(iobj, &imem->list, head) {
-               if (iobj->suspend)
-                       nvkm_instobj_load(iobj);
+               imem->suspend = false;
+               return 0;
        }
 
+       nvkm_bar_bar2_init(subdev->device);
        return 0;
 }
 
index a4ac94a2ab57fccc10e2c9e4956d823322edc265..1b811d6972a16df8c4335552b2655578510d38f8 100644 (file)
@@ -564,6 +564,8 @@ gk20a_instmem_dtor(struct nvkm_instmem *base)
 static const struct nvkm_instmem_func
 gk20a_instmem = {
        .dtor = gk20a_instmem_dtor,
+       .suspend = nv04_instmem_suspend,
+       .resume = nv04_instmem_resume,
        .memory_new = gk20a_instobj_new,
        .zero = false,
 };
index 25603b01d6f8421a1bb01d73559a1dfb87b0e2d7..e5320ef849bfc99cabf205bc68bb9faf07974176 100644 (file)
@@ -25,6 +25,7 @@
 #include "priv.h"
 
 #include <core/ramht.h>
+#include <subdev/bar.h>
 
 struct nv04_instmem {
        struct nvkm_instmem base;
@@ -154,6 +155,48 @@ nv04_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
        nvkm_wr32(imem->subdev.device, 0x700000 + addr, data);
 }
 
+void
+nv04_instmem_resume(struct nvkm_instmem *imem)
+{
+       struct nvkm_instobj *iobj;
+
+       list_for_each_entry(iobj, &imem->boot, head) {
+               if (iobj->suspend)
+                       nvkm_instobj_load(iobj);
+       }
+
+       nvkm_bar_bar2_init(imem->subdev.device);
+
+       list_for_each_entry(iobj, &imem->list, head) {
+               if (iobj->suspend)
+                       nvkm_instobj_load(iobj);
+       }
+}
+
+int
+nv04_instmem_suspend(struct nvkm_instmem *imem)
+{
+       struct nvkm_instobj *iobj;
+
+       list_for_each_entry(iobj, &imem->list, head) {
+               if (iobj->preserve) {
+                       int ret = nvkm_instobj_save(iobj);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       nvkm_bar_bar2_fini(imem->subdev.device);
+
+       list_for_each_entry(iobj, &imem->boot, head) {
+               int ret = nvkm_instobj_save(iobj);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
 static int
 nv04_instmem_oneinit(struct nvkm_instmem *base)
 {
@@ -210,6 +253,8 @@ static const struct nvkm_instmem_func
 nv04_instmem = {
        .dtor = nv04_instmem_dtor,
        .oneinit = nv04_instmem_oneinit,
+       .suspend = nv04_instmem_suspend,
+       .resume = nv04_instmem_resume,
        .rd32 = nv04_instmem_rd32,
        .wr32 = nv04_instmem_wr32,
        .memory_new = nv04_instobj_new,
index 4b2d7465d22f75c3ca25cddf7f3a8730a2a51f74..a7f3fc342d87e03b031b5008d939c2eb46f49404 100644 (file)
@@ -27,6 +27,7 @@
 #include <core/memory.h>
 #include <subdev/bar.h>
 #include <subdev/fb.h>
+#include <subdev/gsp.h>
 #include <subdev/mmu.h>
 
 struct nv50_instmem {
@@ -394,24 +395,44 @@ nv50_instmem_fini(struct nvkm_instmem *base)
        nv50_instmem(base)->addr = ~0ULL;
 }
 
+static void *
+nv50_instmem_dtor(struct nvkm_instmem *base)
+{
+       return nv50_instmem(base);
+}
+
 static const struct nvkm_instmem_func
 nv50_instmem = {
+       .dtor = nv50_instmem_dtor,
        .fini = nv50_instmem_fini,
+       .suspend = nv04_instmem_suspend,
+       .resume = nv04_instmem_resume,
        .memory_new = nv50_instobj_new,
        .memory_wrap = nv50_instobj_wrap,
        .zero = false,
 };
 
 int
-nv50_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
-                struct nvkm_instmem **pimem)
+nv50_instmem_new_(const struct nvkm_instmem_func *func,
+                 struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+                 struct nvkm_instmem **pimem)
 {
        struct nv50_instmem *imem;
 
        if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
                return -ENOMEM;
-       nvkm_instmem_ctor(&nv50_instmem, device, type, inst, &imem->base);
+       nvkm_instmem_ctor(func, device, type, inst, &imem->base);
        INIT_LIST_HEAD(&imem->lru);
        *pimem = &imem->base;
        return 0;
 }
+
+int
+nv50_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+                struct nvkm_instmem **pimem)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_instmem_new(&nv50_instmem, device, type, inst, pimem);
+
+       return nv50_instmem_new_(&nv50_instmem, device, type, inst, pimem);
+}
index 390ca00ab5678b3bac845eb00f0dc56b5920c3a4..4c14c96fb60a28cdf940784809c4050bda3dc24d 100644 (file)
@@ -7,6 +7,8 @@
 struct nvkm_instmem_func {
        void *(*dtor)(struct nvkm_instmem *);
        int (*oneinit)(struct nvkm_instmem *);
+       int (*suspend)(struct nvkm_instmem *);
+       void (*resume)(struct nvkm_instmem *);
        void (*fini)(struct nvkm_instmem *);
        u32  (*rd32)(struct nvkm_instmem *, u32 addr);
        void (*wr32)(struct nvkm_instmem *, u32 addr, u32 data);
@@ -16,10 +18,19 @@ struct nvkm_instmem_func {
        bool zero;
 };
 
+int nv50_instmem_new_(const struct nvkm_instmem_func *, struct nvkm_device *,
+                     enum nvkm_subdev_type, int, struct nvkm_instmem **);
+
 void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *,
                       enum nvkm_subdev_type, int, struct nvkm_instmem *);
 void nvkm_instmem_boot(struct nvkm_instmem *);
 
+int nv04_instmem_suspend(struct nvkm_instmem *);
+void nv04_instmem_resume(struct nvkm_instmem *);
+
+int r535_instmem_new(const struct nvkm_instmem_func *,
+                    struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
+
 #include <core/memory.h>
 
 struct nvkm_instobj {
@@ -32,4 +43,6 @@ struct nvkm_instobj {
 void nvkm_instobj_ctor(const struct nvkm_memory_func *func,
                       struct nvkm_instmem *, struct nvkm_instobj *);
 void nvkm_instobj_dtor(struct nvkm_instmem *, struct nvkm_instobj *);
+int nvkm_instobj_save(struct nvkm_instobj *);
+void nvkm_instobj_load(struct nvkm_instobj *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c
new file mode 100644 (file)
index 0000000..5f3c9c0
--- /dev/null
@@ -0,0 +1,333 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvhw/drf.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+#include <nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h>
+#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+
+struct fbsr_item {
+       const char *type;
+       u64 addr;
+       u64 size;
+
+       struct list_head head;
+};
+
+struct fbsr {
+       struct list_head items;
+
+       u64 size;
+       int regions;
+
+       struct nvkm_gsp_client client;
+       struct nvkm_gsp_device device;
+
+       u64 hmemory;
+       u64 sys_offset;
+};
+
+static int
+fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper,
+            u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object)
+{
+       struct nvkm_gsp_client *client = device->object.client;
+       struct nvkm_gsp *gsp = client->gsp;
+       const u32 pages = size / GSP_PAGE_SIZE;
+       rpc_alloc_memory_v13_01 *rpc;
+       int ret;
+
+       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY,
+                              sizeof(*rpc) + pages * sizeof(rpc->pteDesc.pte_pde[0]));
+       if (IS_ERR(rpc))
+               return PTR_ERR(rpc);
+
+       rpc->hClient = client->object.handle;
+       rpc->hDevice = device->object.handle;
+       rpc->hMemory = handle;
+       if (aper == NVKM_MEM_TARGET_HOST) {
+               rpc->hClass = NV01_MEMORY_LIST_SYSTEM;
+               rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, NONCONTIGUOUS) |
+                            NVDEF(NVOS02, FLAGS, LOCATION, PCI) |
+                            NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP);
+       } else {
+               rpc->hClass = NV01_MEMORY_LIST_FBMEM;
+               rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, CONTIGUOUS) |
+                            NVDEF(NVOS02, FLAGS, LOCATION, VIDMEM) |
+                            NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP);
+               rpc->format = 6; /* NV_MMU_PTE_KIND_GENERIC_MEMORY */
+       }
+       rpc->pteAdjust = 0;
+       rpc->length = size;
+       rpc->pageCount = pages;
+       rpc->pteDesc.idr = 0;
+       rpc->pteDesc.reserved1 = 0;
+       rpc->pteDesc.length = pages;
+
+       if (sgt) {
+               struct scatterlist *sgl;
+               int pte = 0, idx;
+
+               for_each_sgtable_dma_sg(sgt, sgl, idx) {
+                       for (int i = 0; i < sg_dma_len(sgl) / GSP_PAGE_SIZE; i++)
+                               rpc->pteDesc.pte_pde[pte++].pte = (sg_dma_address(sgl) >> 12) + i;
+
+               }
+       } else {
+               for (int i = 0; i < pages; i++)
+                       rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i;
+       }
+
+       ret = nvkm_gsp_rpc_wr(gsp, rpc, true);
+       if (ret)
+               return ret;
+
+       object->client = device->object.client;
+       object->parent = &device->object;
+       object->handle = handle;
+       return 0;
+}
+
+static int
+fbsr_send(struct fbsr *fbsr, struct fbsr_item *item)
+{
+       NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS *ctrl;
+       struct nvkm_gsp *gsp = fbsr->client.gsp;
+       struct nvkm_gsp_object memlist;
+       int ret;
+
+       ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM,
+                          item->addr, item->size, NULL, &memlist);
+       if (ret)
+               return ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+                                   NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO,
+                                   sizeof(*ctrl));
+       if (IS_ERR(ctrl)) {
+               ret = PTR_ERR(ctrl);
+               goto done;
+       }
+
+       ctrl->fbsrType = FBSR_TYPE_DMA;
+       ctrl->hClient = fbsr->client.object.handle;
+       ctrl->hVidMem = fbsr->hmemory++;
+       ctrl->vidOffset = 0;
+       ctrl->sysOffset = fbsr->sys_offset;
+       ctrl->size = item->size;
+
+       ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+done:
+       nvkm_gsp_rm_free(&memlist);
+       if (ret)
+               return ret;
+
+       fbsr->sys_offset += item->size;
+       return 0;
+}
+
+static int
+fbsr_init(struct fbsr *fbsr, struct sg_table *sgt, u64 items_size)
+{
+       NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl;
+       struct nvkm_gsp *gsp = fbsr->client.gsp;
+       struct nvkm_gsp_object memlist;
+       int ret;
+
+       ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST,
+                          0, fbsr->size, sgt, &memlist);
+       if (ret)
+               return ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+                                   NV2080_CTRL_CMD_INTERNAL_FBSR_INIT, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->fbsrType = FBSR_TYPE_DMA;
+       ctrl->numRegions = fbsr->regions;
+       ctrl->hClient = fbsr->client.object.handle;
+       ctrl->hSysMem = fbsr->hmemory++;
+       ctrl->gspFbAllocsSysOffset = items_size;
+
+       ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+       if (ret)
+               return ret;
+
+       nvkm_gsp_rm_free(&memlist);
+       return 0;
+}
+
+static bool
+fbsr_vram(struct fbsr *fbsr, const char *type, u64 addr, u64 size)
+{
+       struct fbsr_item *item;
+
+       if (!(item = kzalloc(sizeof(*item), GFP_KERNEL)))
+               return false;
+
+       item->type = type;
+       item->addr = addr;
+       item->size = size;
+       list_add_tail(&item->head, &fbsr->items);
+       return true;
+}
+
+static bool
+fbsr_inst(struct fbsr *fbsr, const char *type, struct nvkm_memory *memory)
+{
+       return fbsr_vram(fbsr, type, nvkm_memory_addr(memory), nvkm_memory_size(memory));
+}
+
+static void
+r535_instmem_resume(struct nvkm_instmem *imem)
+{
+       /* RM has restored VRAM contents already, so just need to free the sysmem buffer. */
+       if (imem->rm.fbsr_valid) {
+               nvkm_gsp_sg_free(imem->subdev.device, &imem->rm.fbsr);
+               imem->rm.fbsr_valid = false;
+       }
+}
+
+static int
+r535_instmem_suspend(struct nvkm_instmem *imem)
+{
+       struct nvkm_subdev *subdev = &imem->subdev;
+       struct nvkm_device *device = subdev->device;
+       struct nvkm_gsp *gsp = device->gsp;
+       struct nvkm_instobj *iobj;
+       struct fbsr fbsr = {};
+       struct fbsr_item *item, *temp;
+       u64 items_size;
+       int ret;
+
+       INIT_LIST_HEAD(&fbsr.items);
+       fbsr.hmemory = 0xcaf00003;
+
+       /* Create a list of all regions we need RM to save during suspend. */
+       list_for_each_entry(iobj, &imem->list, head) {
+               if (iobj->preserve) {
+                       if (!fbsr_inst(&fbsr, "inst", &iobj->memory))
+                               return -ENOMEM;
+               }
+       }
+
+       list_for_each_entry(iobj, &imem->boot, head) {
+               if (!fbsr_inst(&fbsr, "boot", &iobj->memory))
+                       return -ENOMEM;
+       }
+
+       if (!fbsr_vram(&fbsr, "gsp-non-wpr", gsp->fb.heap.addr, gsp->fb.heap.size))
+               return -ENOMEM;
+
+       /* Determine memory requirements. */
+       list_for_each_entry(item, &fbsr.items, head) {
+               nvkm_debug(subdev, "fbsr: %016llx %016llx %s\n",
+                          item->addr, item->size, item->type);
+               fbsr.size += item->size;
+               fbsr.regions++;
+       }
+
+       items_size = fbsr.size;
+       nvkm_debug(subdev, "fbsr: %d regions (0x%llx bytes)\n", fbsr.regions, items_size);
+
+       fbsr.size += gsp->fb.rsvd_size;
+       fbsr.size += gsp->fb.bios.vga_workspace.size;
+       nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", fbsr.size);
+
+       ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &imem->rm.fbsr);
+       if (ret)
+               goto done;
+
+       /* Tell RM about the sysmem which will hold VRAM contents across suspend. */
+       ret = nvkm_gsp_client_device_ctor(gsp, &fbsr.client, &fbsr.device);
+       if (ret)
+               goto done_sgt;
+
+       ret = fbsr_init(&fbsr, &imem->rm.fbsr, items_size);
+       if (WARN_ON(ret))
+               goto done_sgt;
+
+       /* Send VRAM regions that need saving. */
+       list_for_each_entry(item, &fbsr.items, head) {
+               ret = fbsr_send(&fbsr, item);
+               if (WARN_ON(ret))
+                       goto done_sgt;
+       }
+
+       imem->rm.fbsr_valid = true;
+
+       /* Cleanup everything except the sysmem backup, which will be removed after resume. */
+done_sgt:
+       if (ret) /* ... unless we failed already. */
+               nvkm_gsp_sg_free(device, &imem->rm.fbsr);
+done:
+       list_for_each_entry_safe(item, temp, &fbsr.items, head) {
+               list_del(&item->head);
+               kfree(item);
+       }
+
+       nvkm_gsp_device_dtor(&fbsr.device);
+       nvkm_gsp_client_dtor(&fbsr.client);
+       return ret;
+}
+
+static void *
+r535_instmem_dtor(struct nvkm_instmem *imem)
+{
+       kfree(imem->func);
+       return imem;
+}
+
+int
+r535_instmem_new(const struct nvkm_instmem_func *hw,
+                struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+                struct nvkm_instmem **pinstmem)
+{
+       struct nvkm_instmem_func *rm;
+       int ret;
+
+       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_instmem_dtor;
+       rm->fini = hw->fini;
+       rm->suspend = r535_instmem_suspend;
+       rm->resume  = r535_instmem_resume;
+       rm->memory_new = hw->memory_new;
+       rm->memory_wrap = hw->memory_wrap;
+       rm->zero = false;
+
+       ret = nv50_instmem_new_(rm, device, type, inst, pinstmem);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
index 159d9f8c95f34f621cb73dc51ff0514e9790ece6..951f01e3032a122ca4a84ae0078fa590433f9e20 100644 (file)
@@ -21,6 +21,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static void
 ga102_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4])
 {
@@ -53,5 +55,8 @@ int
 ga102_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_ltc **pltc)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_ltc_new_(&ga102_ltc, device, type, inst, pltc);
 }
index 265a05fd5f6bca4dcf164860d094155b4036ecfc..053302ecb0a5ef060a3f3c12a8f6bfb93685edac 100644 (file)
@@ -21,6 +21,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 void
 gp102_ltc_zbc_clear_stencil(struct nvkm_ltc *ltc, int i, const u32 stencil)
 {
@@ -49,5 +51,8 @@ int
 gp102_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_ltc **pltc)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_ltc_new_(&gp102_ltc, device, type, inst, pltc);
 }
index 5d28d30d09d5c512d8bcef50f7300ff75d9b57aa..65e9f04972dce3b19ac5dfb659d1b95d4eb9a6ee 100644 (file)
@@ -21,6 +21,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static void
 ga100_mc_device_disable(struct nvkm_mc *mc, u32 mask)
 {
@@ -72,5 +74,8 @@ ga100_mc = {
 int
 ga100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_mc_new_(&ga100_mc, device, type, inst, pmc);
 }
index eb2ab03f43607353466e3bea0c5b616d21047cc9..05d2fa95e05eaca175aa090fa2513f8f16051dab 100644 (file)
@@ -23,6 +23,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 const struct nvkm_intr_data
 gp100_mc_intrs[] = {
        { NVKM_ENGINE_DISP    , 0, 0, 0x04000000, true },
@@ -98,5 +100,8 @@ gp100_mc = {
 int
 gp100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_mc_new_(&gp100_mc, device, type, inst, pmc);
 }
index a602b0cb5b31d9089781cd5961b0afad294f1b09..7ba35ea59c06d001109e59408f167f84103a8003 100644 (file)
@@ -16,6 +16,8 @@ nvkm-y += nvkm/subdev/mmu/gp10b.o
 nvkm-y += nvkm/subdev/mmu/gv100.o
 nvkm-y += nvkm/subdev/mmu/tu102.o
 
+nvkm-y += nvkm/subdev/mmu/r535.o
+
 nvkm-y += nvkm/subdev/mmu/mem.o
 nvkm-y += nvkm/subdev/mmu/memnv04.o
 nvkm-y += nvkm/subdev/mmu/memnv50.o
index ad3b44a9e0e71583fd154b653e7f9e2f03e77f80..b67ace7ae93ce4b49316507d05f0a2283a8e7dd1 100644 (file)
@@ -403,6 +403,10 @@ nvkm_mmu_dtor(struct nvkm_subdev *subdev)
 
        nvkm_mmu_ptc_fini(mmu);
        mutex_destroy(&mmu->mutex);
+
+       if (mmu->func->dtor)
+               mmu->func->dtor(mmu);
+
        return mmu;
 }
 
index 5265bf4d8366c0133a0318b318f26071ac3262fa..e9ca6537778ce182e2313700187d4483600a2199 100644 (file)
@@ -4,12 +4,16 @@
 #define nvkm_mmu(p) container_of((p), struct nvkm_mmu, subdev)
 #include <subdev/mmu.h>
 
+int r535_mmu_new(const struct nvkm_mmu_func *hw, struct nvkm_device *, enum nvkm_subdev_type, int,
+                struct nvkm_mmu **);
+
 void nvkm_mmu_ctor(const struct nvkm_mmu_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
                   struct nvkm_mmu *);
 int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
                  struct nvkm_mmu **);
 
 struct nvkm_mmu_func {
+       void (*dtor)(struct nvkm_mmu *);
        void (*init)(struct nvkm_mmu *);
 
        u8  dma_bits;
@@ -37,6 +41,8 @@ struct nvkm_mmu_func {
 
        const u8 *(*kind)(struct nvkm_mmu *, int *count, u8 *invalid);
        bool kind_sys;
+
+       int (*promote_vmm)(struct nvkm_vmm *);
 };
 
 extern const struct nvkm_mmu_func nv04_mmu;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c
new file mode 100644 (file)
index 0000000..d3e9545
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+
+static int
+r535_mmu_promote_vmm(struct nvkm_vmm *vmm)
+{
+       NV_VASPACE_ALLOCATION_PARAMETERS *args;
+       int ret;
+
+       ret = nvkm_gsp_client_device_ctor(vmm->mmu->subdev.device->gsp,
+                                         &vmm->rm.client, &vmm->rm.device);
+       if (ret)
+               return ret;
+
+       args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, 0x90f10000, FERMI_VASPACE_A,
+                                    sizeof(*args), &vmm->rm.object);
+       if (IS_ERR(args))
+               return PTR_ERR(args);
+
+       args->index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW;
+
+       ret = nvkm_gsp_rm_alloc_wr(&vmm->rm.object, args);
+       if (ret)
+               return ret;
+
+       {
+               NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *ctrl;
+
+               mutex_lock(&vmm->mutex.vmm);
+               ret = nvkm_vmm_get_locked(vmm, true, false, false, 0x1d, 32, 0x20000000,
+                                         &vmm->rm.rsvd);
+               mutex_unlock(&vmm->mutex.vmm);
+               if (ret)
+                       return ret;
+
+               ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.object,
+                                           NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES,
+                                           sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               ctrl->pageSize = 0x20000000;
+               ctrl->virtAddrLo = vmm->rm.rsvd->addr;
+               ctrl->virtAddrHi = vmm->rm.rsvd->addr + vmm->rm.rsvd->size - 1;
+               ctrl->numLevelsToCopy = vmm->pd->pde[0]->pde[0] ? 3 : 2;
+               ctrl->levels[0].physAddress = vmm->pd->pt[0]->addr;
+               ctrl->levels[0].size = 0x20;
+               ctrl->levels[0].aperture = 1;
+               ctrl->levels[0].pageShift = 0x2f;
+               ctrl->levels[1].physAddress = vmm->pd->pde[0]->pt[0]->addr;
+               ctrl->levels[1].size = 0x1000;
+               ctrl->levels[1].aperture = 1;
+               ctrl->levels[1].pageShift = 0x26;
+               if (vmm->pd->pde[0]->pde[0]) {
+                       ctrl->levels[2].physAddress = vmm->pd->pde[0]->pde[0]->pt[0]->addr;
+                       ctrl->levels[2].size = 0x1000;
+                       ctrl->levels[2].aperture = 1;
+                       ctrl->levels[2].pageShift = 0x1d;
+               }
+
+               ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.object, ctrl);
+       }
+
+       return ret;
+}
+
+static void
+r535_mmu_dtor(struct nvkm_mmu *mmu)
+{
+       kfree(mmu->func);
+}
+
+int
+r535_mmu_new(const struct nvkm_mmu_func *hw,
+            struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+            struct nvkm_mmu **pmmu)
+{
+       struct nvkm_mmu_func *rm;
+       int ret;
+
+       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_mmu_dtor;
+       rm->dma_bits = hw->dma_bits;
+       rm->mmu = hw->mmu;
+       rm->mem = hw->mem;
+       rm->vmm = hw->vmm;
+       rm->kind = hw->kind;
+       rm->kind_sys = hw->kind_sys;
+       rm->promote_vmm = r535_mmu_promote_vmm;
+
+       ret = nvkm_mmu_new_(rm, device, type, inst, pmmu);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
index 8d060ce47f8657aeef67c7533f0f24f2d4e2930e..df662ce4a4b0173bc40cced588cf6dd71aa0de03 100644 (file)
@@ -24,6 +24,7 @@
 #include "vmm.h"
 
 #include <core/option.h>
+#include <subdev/gsp.h>
 
 #include <nvif/class.h>
 
@@ -54,5 +55,8 @@ int
 tu102_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_mmu **pmmu)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_mmu_new(&tu102_mmu, device, type, inst, pmmu);
+
        return nvkm_mmu_new_(&tu102_mmu, device, type, inst, pmmu);
 }
index 8e459d88ff8f89426e163e43069a1a0698b9c44f..cf490ff2b9f142be86ab9ff8944b0155268e0bd0 100644 (file)
@@ -572,6 +572,12 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
        }
        uvmm->vmm->managed.raw = raw;
 
+       if (mmu->func->promote_vmm) {
+               ret = mmu->func->promote_vmm(uvmm->vmm);
+               if (ret)
+                       return ret;
+       }
+
        page = uvmm->vmm->func->page;
        args->v0.page_nr = 0;
        while (page && (page++)->shift)
index eb5fcadcb39aa66caaf27308eb36de602a483019..9c97800fe03777e27fe9926b1bc9c86041843a94 100644 (file)
@@ -1030,6 +1030,13 @@ nvkm_vmm_dtor(struct nvkm_vmm *vmm)
        struct nvkm_vma *vma;
        struct rb_node *node;
 
+       if (vmm->rm.client.gsp) {
+               nvkm_gsp_rm_free(&vmm->rm.object);
+               nvkm_gsp_device_dtor(&vmm->rm.device);
+               nvkm_gsp_client_dtor(&vmm->rm.client);
+               nvkm_vmm_put(vmm, &vmm->rm.rsvd);
+       }
+
        if (0)
                nvkm_vmm_dump(vmm);
 
index 0095d58d4d9a10ef88e173b176bf5d2fddb06bc1..e34bc60764010f8307a01ce4fe21808bb99e5e43 100644 (file)
@@ -35,9 +35,11 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
 
        mutex_lock(&vmm->mmu->mutex);
 
-       nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8);
+       if (!vmm->rm.bar2_pdb)
+               nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8);
+       else
+               nvkm_wr32(device, 0xb830a0, vmm->rm.bar2_pdb >> 8);
        nvkm_wr32(device, 0xb830a4, 0x00000000);
-       nvkm_wr32(device, 0x100e68, 0x00000000);
        nvkm_wr32(device, 0xb830b0, 0x80000000 | type);
 
        nvkm_msec(device, 2000,
index cd3148360996f24964f9f892524672ddacbc8168..da5b2b2190d3d6f741dc1f7eb06dc3ca3519bfcb 100644 (file)
@@ -23,6 +23,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static const struct nvkm_falcon_func
 gp102_pmu_flcn = {
        .disable = gm200_flcn_disable,
@@ -54,5 +56,8 @@ int
 gp102_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_pmu **ppmu)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_pmu_new_(gp102_pmu_fwif, device, type, inst, ppmu);
 }
index b4eaf6db36d728fda55143f85d5ab61b9212beb9..b4530073bfdc44ed8f9b3abe6935cf8b533b8026 100644 (file)
@@ -23,6 +23,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static const struct nvkm_subdev_func
 gm200_privring = {
        .intr = gk104_privring_intr,
@@ -32,5 +34,8 @@ int
 gm200_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
                   struct nvkm_subdev **pprivring)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_subdev_new_(&gm200_privring, device, type, inst, pprivring);
 }
index 44f021392b955d9f708bd1c77475b6680c22dd31..5392833d361483950259464673a8e7a3d464628f 100644 (file)
@@ -23,6 +23,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static int
 gp100_temp_get(struct nvkm_therm *therm)
 {
@@ -52,5 +54,8 @@ int
 gp100_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
                struct nvkm_therm **ptherm)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_therm_new_(&gp100_therm, device, type, inst, ptherm);
 }
index 84790cf52b9029b97cc62da1e08d9f8ae1a96950..129eabb8b9e60802c820013e2dcfc159bab4e73e 100644 (file)
@@ -21,6 +21,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static int
 ga100_top_parse(struct nvkm_top *top)
 {
@@ -76,7 +78,7 @@ ga100_top_parse(struct nvkm_top *top)
                case 0x00000012: I_(NVKM_SUBDEV_IOCTRL, inst); break;
                case 0x00000013: I_(NVKM_ENGINE_CE    , inst); break;
                case 0x00000014: O_(NVKM_SUBDEV_GSP   ,    0); break;
-               case 0x00000015: O_(NVKM_ENGINE_NVJPG ,    0); break;
+               case 0x00000015: I_(NVKM_ENGINE_NVJPG , inst); break;
                case 0x00000016: O_(NVKM_ENGINE_OFA   ,    0); break;
                case 0x00000017: O_(NVKM_SUBDEV_FLA   ,    0); break;
                        break;
@@ -104,5 +106,8 @@ int
 ga100_top_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_top **ptop)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_top_new_(&ga100_top, device, type, inst, ptop);
 }
index 2bbba8244cbf14354007984206b39ac417cd7123..da55dac8c286a4faf914068216a5b72db5c135bb 100644 (file)
@@ -23,6 +23,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static int
 gk104_top_parse(struct nvkm_top *top)
 {
@@ -89,7 +91,7 @@ gk104_top_parse(struct nvkm_top *top)
                case 0x00000012: I_(NVKM_SUBDEV_IOCTRL, inst); break;
                case 0x00000013: I_(NVKM_ENGINE_CE    , inst); break;
                case 0x00000014: O_(NVKM_SUBDEV_GSP   ,    0); break;
-               case 0x00000015: O_(NVKM_ENGINE_NVJPG ,    0); break;
+               case 0x00000015: I_(NVKM_ENGINE_NVJPG , inst); break;
                default:
                        break;
                }
@@ -115,5 +117,8 @@ int
 gk104_top_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_top **ptop)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_top_new_(&gk104_top, device, type, inst, ptop);
 }
index 23cd21b40a25e463c52480e6e859290898e86a72..23a85460615f267ccba7fb75e3b8a9c080d7f19e 100644 (file)
@@ -4,3 +4,5 @@ nvkm-y += nvkm/subdev/vfn/uvfn.o
 nvkm-y += nvkm/subdev/vfn/gv100.o
 nvkm-y += nvkm/subdev/vfn/tu102.o
 nvkm-y += nvkm/subdev/vfn/ga100.o
+
+nvkm-y += nvkm/subdev/vfn/r535.o
index fd5c6931322dc57c0b3d6fbd5d7ef0aee49d57f5..bb0bb6fda54b9d353ded9ebedb4dc81d73a2890a 100644 (file)
@@ -21,6 +21,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 #include <nvif/class.h>
 
 static const struct nvkm_intr_data
@@ -43,5 +45,8 @@ int
 ga100_vfn_new(struct nvkm_device *device,
              enum nvkm_subdev_type type, int inst, struct nvkm_vfn **pvfn)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_vfn_new(&ga100_vfn, device, type, inst, 0xb80000, pvfn);
+
        return nvkm_vfn_new_(&ga100_vfn, device, type, inst, 0xb80000, pvfn);
 }
index 96d53c02041b582449854a3992136484e80cef13..3a09781ad0320452c27009be5244c7ca4edbb627 100644 (file)
@@ -5,16 +5,21 @@
 #include <subdev/vfn.h>
 
 struct nvkm_vfn_func {
+       void (*dtor)(struct nvkm_vfn *);
+
        const struct nvkm_intr_func *intr;
        const struct nvkm_intr_data *intrs;
 
        struct {
                u32 addr;
                u32 size;
-               const struct nvkm_sclass base;
+               struct nvkm_sclass base;
        } user;
 };
 
+int r535_vfn_new(const struct nvkm_vfn_func *hw, struct nvkm_device *, enum nvkm_subdev_type, int,
+                u32 addr, struct nvkm_vfn **);
+
 int nvkm_vfn_new_(const struct nvkm_vfn_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
                  u32 addr, struct nvkm_vfn **);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
new file mode 100644 (file)
index 0000000..dce3373
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void
+r535_vfn_dtor(struct nvkm_vfn *vfn)
+{
+       kfree(vfn->func);
+}
+
+int
+r535_vfn_new(const struct nvkm_vfn_func *hw,
+            struct nvkm_device *device, enum nvkm_subdev_type type, int inst, u32 addr,
+            struct nvkm_vfn **pvfn)
+{
+       struct nvkm_vfn_func *rm;
+       int ret;
+
+       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_vfn_dtor;
+       rm->intr = hw->intr;
+       rm->user = hw->user;
+
+       ret = nvkm_vfn_new_(rm, device, type, inst, addr, pvfn);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
index 3d063fb5e136612c2995933bf23ed13b65219f25..a3bf13c5c79b133aef3eb71f8434091ab602f2d7 100644 (file)
@@ -21,6 +21,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 #include <nvif/class.h>
 
 static void
@@ -104,5 +106,8 @@ int
 tu102_vfn_new(struct nvkm_device *device,
              enum nvkm_subdev_type type, int inst, struct nvkm_vfn **pvfn)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_vfn_new(&tu102_vfn, device, type, inst, 0xb80000, pvfn);
+
        return nvkm_vfn_new_(&tu102_vfn, device, type, inst, 0xb80000, pvfn);
 }