forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-11 04dd17822334871b23ea2862f7798fb0e0007777
kernel/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
....@@ -25,14 +25,15 @@
2525 #include <subdev/mmu.h>
2626 #include <engine/fifo.h>
2727
28
+#include <nvif/class.h>
29
+
2830 static void
2931 gv100_fault_buffer_process(struct nvkm_fault_buffer *buffer)
3032 {
3133 struct nvkm_device *device = buffer->fault->subdev.device;
3234 struct nvkm_memory *mem = buffer->mem;
33
- const u32 foff = buffer->id * 0x14;
34
- u32 get = nvkm_rd32(device, 0x100e2c + foff);
35
- u32 put = nvkm_rd32(device, 0x100e30 + foff);
35
+ u32 get = nvkm_rd32(device, buffer->get);
36
+ u32 put = nvkm_rd32(device, buffer->put);
3637 if (put == get)
3738 return;
3839
....@@ -51,7 +52,7 @@
5152
5253 if (++get == buffer->entries)
5354 get = 0;
54
- nvkm_wr32(device, 0x100e2c + foff, get);
55
+ nvkm_wr32(device, buffer->get, get);
5556
5657 info.addr = ((u64)addrhi << 32) | addrlo;
5758 info.inst = ((u64)insthi << 32) | instlo;
....@@ -70,13 +71,21 @@
7071 }
7172
7273 static void
73
-gv100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
74
+gv100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
7475 {
7576 struct nvkm_device *device = buffer->fault->subdev.device;
7677 const u32 intr = buffer->id ? 0x08000000 : 0x20000000;
77
- const u32 foff = buffer->id * 0x14;
78
+ if (enable)
79
+ nvkm_mask(device, 0x100a2c, intr, intr);
80
+ else
81
+ nvkm_mask(device, 0x100a34, intr, intr);
82
+}
7883
79
- nvkm_mask(device, 0x100a34, intr, intr);
84
+static void
85
+gv100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
86
+{
87
+ struct nvkm_device *device = buffer->fault->subdev.device;
88
+ const u32 foff = buffer->id * 0x14;
8089 nvkm_mask(device, 0x100e34 + foff, 0x80000000, 0x00000000);
8190 }
8291
....@@ -84,23 +93,25 @@
8493 gv100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
8594 {
8695 struct nvkm_device *device = buffer->fault->subdev.device;
87
- const u32 intr = buffer->id ? 0x08000000 : 0x20000000;
8896 const u32 foff = buffer->id * 0x14;
8997
9098 nvkm_mask(device, 0x100e34 + foff, 0xc0000000, 0x40000000);
91
- nvkm_wr32(device, 0x100e28 + foff, upper_32_bits(buffer->vma->addr));
92
- nvkm_wr32(device, 0x100e24 + foff, lower_32_bits(buffer->vma->addr));
99
+ nvkm_wr32(device, 0x100e28 + foff, upper_32_bits(buffer->addr));
100
+ nvkm_wr32(device, 0x100e24 + foff, lower_32_bits(buffer->addr));
93101 nvkm_mask(device, 0x100e34 + foff, 0x80000000, 0x80000000);
94
- nvkm_mask(device, 0x100a2c, intr, intr);
95102 }
96103
97
-static u32
98
-gv100_fault_buffer_entries(struct nvkm_fault_buffer *buffer)
104
+static void
105
+gv100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
99106 {
100107 struct nvkm_device *device = buffer->fault->subdev.device;
101108 const u32 foff = buffer->id * 0x14;
109
+
102110 nvkm_mask(device, 0x100e34 + foff, 0x40000000, 0x40000000);
103
- return nvkm_rd32(device, 0x100e34 + foff) & 0x000fffff;
111
+
112
+ buffer->entries = nvkm_rd32(device, 0x100e34 + foff) & 0x000fffff;
113
+ buffer->get = 0x100e2c + foff;
114
+ buffer->put = 0x100e30 + foff;
104115 }
105116
106117 static int
....@@ -157,6 +168,13 @@
157168 }
158169 }
159170
171
+ if (stat & 0x08000000) {
172
+ if (fault->buffer[1]) {
173
+ nvkm_event_send(&fault->event, 1, 1, NULL, 0);
174
+ stat &= ~0x08000000;
175
+ }
176
+ }
177
+
160178 if (stat) {
161179 nvkm_debug(subdev, "intr %08x\n", stat);
162180 }
....@@ -166,6 +184,8 @@
166184 gv100_fault_fini(struct nvkm_fault *fault)
167185 {
168186 nvkm_notify_put(&fault->nrpfb);
187
+ if (fault->buffer[0])
188
+ fault->func->buffer.fini(fault->buffer[0]);
169189 nvkm_mask(fault->subdev.device, 0x100a34, 0x80000000, 0x80000000);
170190 }
171191
....@@ -173,14 +193,15 @@
173193 gv100_fault_init(struct nvkm_fault *fault)
174194 {
175195 nvkm_mask(fault->subdev.device, 0x100a2c, 0x80000000, 0x80000000);
196
+ fault->func->buffer.init(fault->buffer[0]);
176197 nvkm_notify_get(&fault->nrpfb);
177198 }
178199
179
-static int
200
+int
180201 gv100_fault_oneinit(struct nvkm_fault *fault)
181202 {
182203 return nvkm_notify_init(&fault->buffer[0]->object, &fault->event,
183
- gv100_fault_ntfy_nrpfb, false, NULL, 0, 0,
204
+ gv100_fault_ntfy_nrpfb, true, NULL, 0, 0,
184205 &fault->nrpfb);
185206 }
186207
....@@ -192,9 +213,18 @@
192213 .intr = gv100_fault_intr,
193214 .buffer.nr = 2,
194215 .buffer.entry_size = 32,
195
- .buffer.entries = gv100_fault_buffer_entries,
216
+ .buffer.info = gv100_fault_buffer_info,
217
+ .buffer.pin = gp100_fault_buffer_pin,
196218 .buffer.init = gv100_fault_buffer_init,
197219 .buffer.fini = gv100_fault_buffer_fini,
220
+ .buffer.intr = gv100_fault_buffer_intr,
221
+ /*TODO: Figure out how to expose non-replayable fault buffer, which,
222
+ * for some reason, is where recoverable CE faults appear...
223
+ *
224
+ * It's a bit tricky, as both NVKM and SVM will need access to
225
+ * the non-replayable fault buffer.
226
+ */
227
+ .user = { { 0, 0, VOLTA_FAULT_BUFFER_A }, 1 },
198228 };
199229
200230 int