2 * Copyright 2018 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #define tu102_mc(p) container_of((p), struct tu102_mc, base)
33 tu102_mc_intr_update(struct tu102_mc *mc)
35 struct nvkm_device *device = mc->base.subdev.device;
36 u32 mask = mc->intr ? mc->mask : 0, i;
38 for (i = 0; i < 2; i++) {
39 nvkm_wr32(device, 0x000180 + (i * 0x04), ~mask);
40 nvkm_wr32(device, 0x000160 + (i * 0x04), mask);
43 if (mask & 0x00000200)
44 nvkm_wr32(device, 0xb81608, 0x6);
46 nvkm_wr32(device, 0xb81610, 0x6);
50 tu102_mc_intr_unarm(struct nvkm_mc *base)
52 struct tu102_mc *mc = tu102_mc(base);
55 spin_lock_irqsave(&mc->lock, flags);
57 tu102_mc_intr_update(mc);
58 spin_unlock_irqrestore(&mc->lock, flags);
62 tu102_mc_intr_rearm(struct nvkm_mc *base)
64 struct tu102_mc *mc = tu102_mc(base);
67 spin_lock_irqsave(&mc->lock, flags);
69 tu102_mc_intr_update(mc);
70 spin_unlock_irqrestore(&mc->lock, flags);
74 tu102_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
76 struct tu102_mc *mc = tu102_mc(base);
79 spin_lock_irqsave(&mc->lock, flags);
80 mc->mask = (mc->mask & ~mask) | intr;
81 tu102_mc_intr_update(mc);
82 spin_unlock_irqrestore(&mc->lock, flags);
86 tu102_mc_intr_stat(struct nvkm_mc *mc)
88 struct nvkm_device *device = mc->subdev.device;
89 u32 intr0 = nvkm_rd32(device, 0x000100);
90 u32 intr1 = nvkm_rd32(device, 0x000104);
91 u32 intr_top = nvkm_rd32(device, 0xb81600);
93 /* Turing and above route the MMU fault interrupts via a different
94 * interrupt tree with different control registers. For the moment remap
95 * them back to the old PMC vector.
97 if (intr_top & 0x00000006)
100 return intr0 | intr1;
104 static const struct nvkm_mc_func
106 .init = nv50_mc_init,
107 .intr = gp100_mc_intr,
108 .intr_unarm = tu102_mc_intr_unarm,
109 .intr_rearm = tu102_mc_intr_rearm,
110 .intr_mask = tu102_mc_intr_mask,
111 .intr_stat = tu102_mc_intr_stat,
112 .reset = gk104_mc_reset,
116 tu102_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
117 enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
121 if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL)))
123 nvkm_mc_ctor(func, device, type, inst, &mc->base);
126 spin_lock_init(&mc->lock);
128 mc->mask = 0x7fffffff;
133 tu102_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
135 return tu102_mc_new_(&tu102_mc, device, type, inst, pmc);