4 #include <core/memory.h>
7 /* Some GPUs have a mapping level with a dual page tables to
8 * support large and small pages in the same address-range.
10 * We track the state of both page tables in one place, which
11 * is why there's multiple PT pointers/refcounts here.
13 struct nvkm_mmu_pt *pt[2];
16 /* Page size handled by this PT.
18 * Tesla backend needs to know this when writinge PDEs,
19 * otherwise unnecessary.
23 /* Entire page table sparse.
25 * Used to propagate sparseness to child page tables.
29 /* Tracking for page directories.
31 * The array is indexed by PDE, and will either point to the
32 * child page table, or indicate the PDE is marked as sparse.
34 #define NVKM_VMM_PDE_INVALID(pde) IS_ERR_OR_NULL(pde)
35 #define NVKM_VMM_PDE_SPARSED(pde) IS_ERR(pde)
36 #define NVKM_VMM_PDE_SPARSE ERR_PTR(-EBUSY)
37 struct nvkm_vmm_pt **pde;
39 /* Tracking for dual page tables.
41 * There's one entry for each LPTE, keeping track of whether
42 * there are valid SPTEs in the same address-range.
44 * This information is used to manage LPTE state transitions.
46 #define NVKM_VMM_PTE_SPARSE 0x80
47 #define NVKM_VMM_PTE_VALID 0x40
48 #define NVKM_VMM_PTE_SPTES 0x3f
52 struct nvkm_vmm_desc_func {
55 struct nvkm_vmm_desc {
62 u8 bits; /* VMA bits covered by PT. */
63 u8 size; /* Bytes-per-PTE. */
64 u32 align; /* PT address alignment. */
65 const struct nvkm_vmm_desc_func *func;
68 struct nvkm_vmm_page {
70 const struct nvkm_vmm_desc *desc;
71 #define NVKM_VMM_PAGE_SPARSE 0x01
72 #define NVKM_VMM_PAGE_VRAM 0x02
73 #define NVKM_VMM_PAGE_HOST 0x04
74 #define NVKM_VMM_PAGE_COMP 0x08
75 #define NVKM_VMM_PAGE_Sxxx (NVKM_VMM_PAGE_SPARSE)
76 #define NVKM_VMM_PAGE_xVxx (NVKM_VMM_PAGE_VRAM)
77 #define NVKM_VMM_PAGE_SVxx (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_VRAM)
78 #define NVKM_VMM_PAGE_xxHx (NVKM_VMM_PAGE_HOST)
79 #define NVKM_VMM_PAGE_SxHx (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_HOST)
80 #define NVKM_VMM_PAGE_xVHx (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_HOST)
81 #define NVKM_VMM_PAGE_SVHx (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_HOST)
82 #define NVKM_VMM_PAGE_xVxC (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_COMP)
83 #define NVKM_VMM_PAGE_SVxC (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_COMP)
84 #define NVKM_VMM_PAGE_xxHC (NVKM_VMM_PAGE_xxHx | NVKM_VMM_PAGE_COMP)
85 #define NVKM_VMM_PAGE_SxHC (NVKM_VMM_PAGE_SxHx | NVKM_VMM_PAGE_COMP)
89 struct nvkm_vmm_func {
90 int (*join)(struct nvkm_vmm *, struct nvkm_memory *inst);
91 void (*part)(struct nvkm_vmm *, struct nvkm_memory *inst);
94 const struct nvkm_vmm_page page[];
97 int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *,
98 u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
99 const char *name, struct nvkm_vmm **);
100 int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *,
101 u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
102 const char *name, struct nvkm_vmm *);
103 void nvkm_vmm_dtor(struct nvkm_vmm *);
105 int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
106 u64, u64, void *, u32, struct lock_class_key *,
107 const char *, struct nvkm_vmm **);
109 int nv04_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
110 struct lock_class_key *, const char *, struct nvkm_vmm **);
111 int nv41_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
112 struct lock_class_key *, const char *, struct nvkm_vmm **);
113 int nv44_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
114 struct lock_class_key *, const char *, struct nvkm_vmm **);