| .. | .. |
|---|
| 149 | 149 | } |
|---|
| 150 | 150 | |
|---|
| 151 | 151 | void |
|---|
| 152 | | -gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl) |
|---|
| 152 | +gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl, |
|---|
| 153 | + struct nvkm_memory *mem, int nr) |
|---|
| 154 | +{ |
|---|
| 155 | + struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
|---|
| 156 | + struct nvkm_device *device = subdev->device; |
|---|
| 157 | + int target; |
|---|
| 158 | + |
|---|
| 159 | + switch (nvkm_memory_target(mem)) { |
|---|
| 160 | + case NVKM_MEM_TARGET_VRAM: target = 0; break; |
|---|
| 161 | + case NVKM_MEM_TARGET_NCOH: target = 3; break; |
|---|
| 162 | + default: |
|---|
| 163 | + WARN_ON(1); |
|---|
| 164 | + return; |
|---|
| 165 | + } |
|---|
| 166 | + |
|---|
| 167 | + nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) | |
|---|
| 168 | + (target << 28)); |
|---|
| 169 | + nvkm_wr32(device, 0x002274, (runl << 20) | nr); |
|---|
| 170 | + |
|---|
| 171 | + if (nvkm_msec(device, 2000, |
|---|
| 172 | + if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000)) |
|---|
| 173 | + break; |
|---|
| 174 | + ) < 0) |
|---|
| 175 | + nvkm_error(subdev, "runlist %d update timeout\n", runl); |
|---|
| 176 | +} |
|---|
| 177 | + |
|---|
| 178 | +void |
|---|
| 179 | +gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl) |
|---|
| 153 | 180 | { |
|---|
| 154 | 181 | const struct gk104_fifo_runlist_func *func = fifo->func->runlist; |
|---|
| 155 | 182 | struct gk104_fifo_chan *chan; |
|---|
| 156 | 183 | struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
|---|
| 157 | | - struct nvkm_device *device = subdev->device; |
|---|
| 158 | 184 | struct nvkm_memory *mem; |
|---|
| 159 | 185 | struct nvkm_fifo_cgrp *cgrp; |
|---|
| 160 | 186 | int nr = 0; |
|---|
| 161 | | - int target; |
|---|
| 162 | 187 | |
|---|
| 163 | 188 | mutex_lock(&subdev->mutex); |
|---|
| 164 | 189 | mem = fifo->runlist[runl].mem[fifo->runlist[runl].next]; |
|---|
| .. | .. |
|---|
| 177 | 202 | } |
|---|
| 178 | 203 | nvkm_done(mem); |
|---|
| 179 | 204 | |
|---|
| 180 | | - switch (nvkm_memory_target(mem)) { |
|---|
| 181 | | - case NVKM_MEM_TARGET_VRAM: target = 0; break; |
|---|
| 182 | | - case NVKM_MEM_TARGET_NCOH: target = 3; break; |
|---|
| 183 | | - default: |
|---|
| 184 | | - WARN_ON(1); |
|---|
| 185 | | - goto unlock; |
|---|
| 186 | | - } |
|---|
| 187 | | - |
|---|
| 188 | | - nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) | |
|---|
| 189 | | - (target << 28)); |
|---|
| 190 | | - nvkm_wr32(device, 0x002274, (runl << 20) | nr); |
|---|
| 191 | | - |
|---|
| 192 | | - if (nvkm_msec(device, 2000, |
|---|
| 193 | | - if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000)) |
|---|
| 194 | | - break; |
|---|
| 195 | | - ) < 0) |
|---|
| 196 | | - nvkm_error(subdev, "runlist %d update timeout\n", runl); |
|---|
| 197 | | -unlock: |
|---|
| 205 | + func->commit(fifo, runl, mem, nr); |
|---|
| 198 | 206 | mutex_unlock(&subdev->mutex); |
|---|
| 199 | 207 | } |
|---|
| 200 | 208 | |
|---|
| .. | .. |
|---|
| 238 | 246 | gk104_fifo_runlist = { |
|---|
| 239 | 247 | .size = 8, |
|---|
| 240 | 248 | .chan = gk104_fifo_runlist_chan, |
|---|
| 249 | + .commit = gk104_fifo_runlist_commit, |
|---|
| 250 | +}; |
|---|
| 251 | + |
|---|
| 252 | +void |
|---|
| 253 | +gk104_fifo_pbdma_init(struct gk104_fifo *fifo) |
|---|
| 254 | +{ |
|---|
| 255 | + struct nvkm_device *device = fifo->base.engine.subdev.device; |
|---|
| 256 | + nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1); |
|---|
| 257 | +} |
|---|
| 258 | + |
|---|
| 259 | +int |
|---|
| 260 | +gk104_fifo_pbdma_nr(struct gk104_fifo *fifo) |
|---|
| 261 | +{ |
|---|
| 262 | + struct nvkm_device *device = fifo->base.engine.subdev.device; |
|---|
| 263 | + /* Determine number of PBDMAs by checking valid enable bits. */ |
|---|
| 264 | + nvkm_wr32(device, 0x000204, 0xffffffff); |
|---|
| 265 | + return hweight32(nvkm_rd32(device, 0x000204)); |
|---|
| 266 | +} |
|---|
| 267 | + |
|---|
| 268 | +const struct gk104_fifo_pbdma_func |
|---|
| 269 | +gk104_fifo_pbdma = { |
|---|
| 270 | + .nr = gk104_fifo_pbdma_nr, |
|---|
| 271 | + .init = gk104_fifo_pbdma_init, |
|---|
| 241 | 272 | }; |
|---|
| 242 | 273 | |
|---|
| 243 | 274 | static void |
|---|
| .. | .. |
|---|
| 267 | 298 | } |
|---|
| 268 | 299 | |
|---|
| 269 | 300 | for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl)) |
|---|
| 270 | | - gk104_fifo_runlist_commit(fifo, runl); |
|---|
| 301 | + gk104_fifo_runlist_update(fifo, runl); |
|---|
| 271 | 302 | |
|---|
| 272 | 303 | nvkm_wr32(device, 0x00262c, runm); |
|---|
| 273 | 304 | nvkm_mask(device, 0x002630, runm, 0x00000000); |
|---|
| .. | .. |
|---|
| 456 | 487 | if (ee && ee->data2) { |
|---|
| 457 | 488 | switch (ee->data2) { |
|---|
| 458 | 489 | case NVKM_SUBDEV_BAR: |
|---|
| 459 | | - nvkm_mask(device, 0x001704, 0x00000000, 0x00000000); |
|---|
| 490 | + nvkm_bar_bar1_reset(device); |
|---|
| 460 | 491 | break; |
|---|
| 461 | 492 | case NVKM_SUBDEV_INSTMEM: |
|---|
| 462 | | - nvkm_mask(device, 0x001714, 0x00000000, 0x00000000); |
|---|
| 493 | + nvkm_bar_bar2_reset(device); |
|---|
| 463 | 494 | break; |
|---|
| 464 | 495 | case NVKM_ENGINE_IFB: |
|---|
| 465 | 496 | nvkm_mask(device, 0x001718, 0x00000000, 0x00000000); |
|---|
| .. | .. |
|---|
| 613 | 644 | struct nvkm_device *device = subdev->device; |
|---|
| 614 | 645 | u32 stat = nvkm_rd32(device, 0x00259c); |
|---|
| 615 | 646 | nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat); |
|---|
| 616 | | -} |
|---|
| 617 | | - |
|---|
| 618 | | -static void |
|---|
| 619 | | -gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit) |
|---|
| 620 | | -{ |
|---|
| 621 | | - struct nvkm_subdev *subdev = &fifo->base.engine.subdev; |
|---|
| 622 | | - struct nvkm_device *device = subdev->device; |
|---|
| 623 | | - u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10)); |
|---|
| 624 | | - u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10)); |
|---|
| 625 | | - u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10)); |
|---|
| 626 | | - u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10)); |
|---|
| 627 | | - struct nvkm_fault_data info; |
|---|
| 628 | | - |
|---|
| 629 | | - info.inst = (u64)inst << 12; |
|---|
| 630 | | - info.addr = ((u64)vahi << 32) | valo; |
|---|
| 631 | | - info.time = 0; |
|---|
| 632 | | - info.engine = unit; |
|---|
| 633 | | - info.valid = 1; |
|---|
| 634 | | - info.gpc = (type & 0x1f000000) >> 24; |
|---|
| 635 | | - info.client = (type & 0x00001f00) >> 8; |
|---|
| 636 | | - info.access = (type & 0x00000080) >> 7; |
|---|
| 637 | | - info.hub = (type & 0x00000040) >> 6; |
|---|
| 638 | | - info.reason = (type & 0x000000ff); |
|---|
| 639 | | - |
|---|
| 640 | | - nvkm_fifo_fault(&fifo->base, &info); |
|---|
| 641 | 647 | } |
|---|
| 642 | 648 | |
|---|
| 643 | 649 | static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = { |
|---|
| .. | .. |
|---|
| 818 | 824 | u32 mask = nvkm_rd32(device, 0x00259c); |
|---|
| 819 | 825 | while (mask) { |
|---|
| 820 | 826 | u32 unit = __ffs(mask); |
|---|
| 821 | | - gk104_fifo_intr_fault(fifo, unit); |
|---|
| 827 | + fifo->func->intr.fault(&fifo->base, unit); |
|---|
| 822 | 828 | nvkm_wr32(device, 0x00259c, (1 << unit)); |
|---|
| 823 | 829 | mask &= ~(1 << unit); |
|---|
| 824 | 830 | } |
|---|
| .. | .. |
|---|
| 904 | 910 | enum nvkm_devidx engidx; |
|---|
| 905 | 911 | u32 *map; |
|---|
| 906 | 912 | |
|---|
| 907 | | - /* Determine number of PBDMAs by checking valid enable bits. */ |
|---|
| 908 | | - nvkm_wr32(device, 0x000204, 0xffffffff); |
|---|
| 909 | | - fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x000204)); |
|---|
| 913 | + fifo->pbdma_nr = fifo->func->pbdma->nr(fifo); |
|---|
| 910 | 914 | nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr); |
|---|
| 911 | 915 | |
|---|
| 912 | 916 | /* Read PBDMA->runlist(s) mapping from HW. */ |
|---|
| .. | .. |
|---|
| 978 | 982 | int i; |
|---|
| 979 | 983 | |
|---|
| 980 | 984 | /* Enable PBDMAs. */ |
|---|
| 981 | | - nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1); |
|---|
| 985 | + fifo->func->pbdma->init(fifo); |
|---|
| 982 | 986 | |
|---|
| 983 | 987 | /* PBDMA[n] */ |
|---|
| 984 | 988 | for (i = 0; i < fifo->pbdma_nr; i++) { |
|---|
| .. | .. |
|---|
| 995 | 999 | |
|---|
| 996 | 1000 | nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12); |
|---|
| 997 | 1001 | |
|---|
| 998 | | - if (fifo->func->init_pbdma_timeout) |
|---|
| 999 | | - fifo->func->init_pbdma_timeout(fifo); |
|---|
| 1002 | + if (fifo->func->pbdma->init_timeout) |
|---|
| 1003 | + fifo->func->pbdma->init_timeout(fifo); |
|---|
| 1000 | 1004 | |
|---|
| 1001 | 1005 | nvkm_wr32(device, 0x002100, 0xffffffff); |
|---|
| 1002 | 1006 | nvkm_wr32(device, 0x002140, 0x7fffffff); |
|---|
| .. | .. |
|---|
| 1175 | 1179 | |
|---|
| 1176 | 1180 | static const struct gk104_fifo_func |
|---|
| 1177 | 1181 | gk104_fifo = { |
|---|
| 1182 | + .intr.fault = gf100_fifo_intr_fault, |
|---|
| 1183 | + .pbdma = &gk104_fifo_pbdma, |
|---|
| 1178 | 1184 | .fault.access = gk104_fifo_fault_access, |
|---|
| 1179 | 1185 | .fault.engine = gk104_fifo_fault_engine, |
|---|
| 1180 | 1186 | .fault.reason = gk104_fifo_fault_reason, |
|---|