.. | .. |
---|
28 | 28 | #include <linux/pci.h> |
---|
29 | 29 | #include <linux/pm_runtime.h> |
---|
30 | 30 | #include <linux/vga_switcheroo.h> |
---|
| 31 | +#include <linux/mmu_notifier.h> |
---|
31 | 32 | |
---|
32 | | -#include <drm/drmP.h> |
---|
33 | 33 | #include <drm/drm_crtc_helper.h> |
---|
| 34 | +#include <drm/drm_ioctl.h> |
---|
| 35 | +#include <drm/drm_vblank.h> |
---|
34 | 36 | |
---|
35 | 37 | #include <core/gpuobj.h> |
---|
36 | 38 | #include <core/option.h> |
---|
.. | .. |
---|
39 | 41 | |
---|
40 | 42 | #include <nvif/driver.h> |
---|
41 | 43 | #include <nvif/fifo.h> |
---|
| 44 | +#include <nvif/push006c.h> |
---|
42 | 45 | #include <nvif/user.h> |
---|
43 | 46 | |
---|
44 | 47 | #include <nvif/class.h> |
---|
45 | 48 | #include <nvif/cl0002.h> |
---|
46 | 49 | #include <nvif/cla06f.h> |
---|
47 | | -#include <nvif/if0004.h> |
---|
48 | 50 | |
---|
49 | 51 | #include "nouveau_drv.h" |
---|
50 | 52 | #include "nouveau_dma.h" |
---|
.. | .. |
---|
63 | 65 | #include "nouveau_usif.h" |
---|
64 | 66 | #include "nouveau_connector.h" |
---|
65 | 67 | #include "nouveau_platform.h" |
---|
| 68 | +#include "nouveau_svm.h" |
---|
| 69 | +#include "nouveau_dmem.h" |
---|
66 | 70 | |
---|
67 | 71 | MODULE_PARM_DESC(config, "option string to pass to driver core"); |
---|
68 | 72 | static char *nouveau_config; |
---|
.. | .. |
---|
173 | 177 | WARN_ON(!list_empty(&cli->worker)); |
---|
174 | 178 | |
---|
175 | 179 | usif_client_fini(cli); |
---|
| 180 | + nouveau_vmm_fini(&cli->svm); |
---|
176 | 181 | nouveau_vmm_fini(&cli->vmm); |
---|
177 | | - nvif_mmu_fini(&cli->mmu); |
---|
178 | | - nvif_device_fini(&cli->device); |
---|
| 182 | + nvif_mmu_dtor(&cli->mmu); |
---|
| 183 | + nvif_device_dtor(&cli->device); |
---|
179 | 184 | mutex_lock(&cli->drm->master.lock); |
---|
180 | | - nvif_client_fini(&cli->base); |
---|
| 185 | + nvif_client_dtor(&cli->base); |
---|
181 | 186 | mutex_unlock(&cli->drm->master.lock); |
---|
182 | 187 | } |
---|
183 | 188 | |
---|
.. | .. |
---|
225 | 230 | cli->name, device, &cli->base); |
---|
226 | 231 | } else { |
---|
227 | 232 | mutex_lock(&drm->master.lock); |
---|
228 | | - ret = nvif_client_init(&drm->master.base, cli->name, device, |
---|
| 233 | + ret = nvif_client_ctor(&drm->master.base, cli->name, device, |
---|
229 | 234 | &cli->base); |
---|
230 | 235 | mutex_unlock(&drm->master.lock); |
---|
231 | 236 | } |
---|
.. | .. |
---|
234 | 239 | goto done; |
---|
235 | 240 | } |
---|
236 | 241 | |
---|
237 | | - ret = nvif_device_init(&cli->base.object, 0, NV_DEVICE, |
---|
| 242 | + ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE, |
---|
238 | 243 | &(struct nv_device_v0) { |
---|
239 | 244 | .device = ~0, |
---|
240 | 245 | }, sizeof(struct nv_device_v0), |
---|
.. | .. |
---|
250 | 255 | goto done; |
---|
251 | 256 | } |
---|
252 | 257 | |
---|
253 | | - ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu); |
---|
| 258 | + ret = nvif_mmu_ctor(&cli->device.object, "drmMmu", mmus[ret].oclass, |
---|
| 259 | + &cli->mmu); |
---|
254 | 260 | if (ret) { |
---|
255 | 261 | NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret); |
---|
256 | 262 | goto done; |
---|
.. | .. |
---|
283 | 289 | } |
---|
284 | 290 | |
---|
285 | 291 | static void |
---|
286 | | -nouveau_accel_fini(struct nouveau_drm *drm) |
---|
| 292 | +nouveau_accel_ce_fini(struct nouveau_drm *drm) |
---|
| 293 | +{ |
---|
| 294 | + nouveau_channel_idle(drm->cechan); |
---|
| 295 | + nvif_object_dtor(&drm->ttm.copy); |
---|
| 296 | + nouveau_channel_del(&drm->cechan); |
---|
| 297 | +} |
---|
| 298 | + |
---|
| 299 | +static void |
---|
| 300 | +nouveau_accel_ce_init(struct nouveau_drm *drm) |
---|
| 301 | +{ |
---|
| 302 | + struct nvif_device *device = &drm->client.device; |
---|
| 303 | + int ret = 0; |
---|
| 304 | + |
---|
| 305 | + /* Allocate channel that has access to a (preferably async) copy |
---|
| 306 | + * engine, to use for TTM buffer moves. |
---|
| 307 | + */ |
---|
| 308 | + if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { |
---|
| 309 | + ret = nouveau_channel_new(drm, device, |
---|
| 310 | + nvif_fifo_runlist_ce(device), 0, |
---|
| 311 | + true, &drm->cechan); |
---|
| 312 | + } else |
---|
| 313 | + if (device->info.chipset >= 0xa3 && |
---|
| 314 | + device->info.chipset != 0xaa && |
---|
| 315 | + device->info.chipset != 0xac) { |
---|
| 316 | + /* Prior to Kepler, there's only a single runlist, so all |
---|
| 317 | + * engines can be accessed from any channel. |
---|
| 318 | + * |
---|
| 319 | + * We still want to use a separate channel though. |
---|
| 320 | + */ |
---|
| 321 | + ret = nouveau_channel_new(drm, device, NvDmaFB, NvDmaTT, false, |
---|
| 322 | + &drm->cechan); |
---|
| 323 | + } |
---|
| 324 | + |
---|
| 325 | + if (ret) |
---|
| 326 | + NV_ERROR(drm, "failed to create ce channel, %d\n", ret); |
---|
| 327 | +} |
---|
| 328 | + |
---|
| 329 | +static void |
---|
| 330 | +nouveau_accel_gr_fini(struct nouveau_drm *drm) |
---|
287 | 331 | { |
---|
288 | 332 | nouveau_channel_idle(drm->channel); |
---|
289 | | - nvif_object_fini(&drm->ntfy); |
---|
| 333 | + nvif_object_dtor(&drm->ntfy); |
---|
290 | 334 | nvkm_gpuobj_del(&drm->notify); |
---|
291 | | - nvif_notify_fini(&drm->flip); |
---|
292 | | - nvif_object_fini(&drm->nvsw); |
---|
293 | 335 | nouveau_channel_del(&drm->channel); |
---|
| 336 | +} |
---|
294 | 337 | |
---|
295 | | - nouveau_channel_idle(drm->cechan); |
---|
296 | | - nvif_object_fini(&drm->ttm.copy); |
---|
297 | | - nouveau_channel_del(&drm->cechan); |
---|
| 338 | +static void |
---|
| 339 | +nouveau_accel_gr_init(struct nouveau_drm *drm) |
---|
| 340 | +{ |
---|
| 341 | + struct nvif_device *device = &drm->client.device; |
---|
| 342 | + u32 arg0, arg1; |
---|
| 343 | + int ret; |
---|
298 | 344 | |
---|
| 345 | + /* Allocate channel that has access to the graphics engine. */ |
---|
| 346 | + if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { |
---|
| 347 | + arg0 = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_GR); |
---|
| 348 | + arg1 = 1; |
---|
| 349 | + } else { |
---|
| 350 | + arg0 = NvDmaFB; |
---|
| 351 | + arg1 = NvDmaTT; |
---|
| 352 | + } |
---|
| 353 | + |
---|
| 354 | + ret = nouveau_channel_new(drm, device, arg0, arg1, false, |
---|
| 355 | + &drm->channel); |
---|
| 356 | + if (ret) { |
---|
| 357 | + NV_ERROR(drm, "failed to create kernel channel, %d\n", ret); |
---|
| 358 | + nouveau_accel_gr_fini(drm); |
---|
| 359 | + return; |
---|
| 360 | + } |
---|
| 361 | + |
---|
| 362 | + /* A SW class is used on pre-NV50 HW to assist with handling the |
---|
| 363 | + * synchronisation of page flips, as well as to implement fences |
---|
| 364 | + * on TNT/TNT2 HW that lacks any kind of support in host. |
---|
| 365 | + */ |
---|
| 366 | + if (!drm->channel->nvsw.client && device->info.family < NV_DEVICE_INFO_V0_TESLA) { |
---|
| 367 | + ret = nvif_object_ctor(&drm->channel->user, "drmNvsw", |
---|
| 368 | + NVDRM_NVSW, nouveau_abi16_swclass(drm), |
---|
| 369 | + NULL, 0, &drm->channel->nvsw); |
---|
| 370 | + if (ret == 0) { |
---|
| 371 | + struct nvif_push *push = drm->channel->chan.push; |
---|
| 372 | + ret = PUSH_WAIT(push, 2); |
---|
| 373 | + if (ret == 0) |
---|
| 374 | + PUSH_NVSQ(push, NV_SW, 0x0000, drm->channel->nvsw.handle); |
---|
| 375 | + } |
---|
| 376 | + |
---|
| 377 | + if (ret) { |
---|
| 378 | + NV_ERROR(drm, "failed to allocate sw class, %d\n", ret); |
---|
| 379 | + nouveau_accel_gr_fini(drm); |
---|
| 380 | + return; |
---|
| 381 | + } |
---|
| 382 | + } |
---|
| 383 | + |
---|
| 384 | + /* NvMemoryToMemoryFormat requires a notifier ctxdma for some reason, |
---|
| 385 | + * even if notification is never requested, so, allocate a ctxdma on |
---|
| 386 | + * any GPU where it's possible we'll end up using M2MF for BO moves. |
---|
| 387 | + */ |
---|
| 388 | + if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { |
---|
| 389 | + ret = nvkm_gpuobj_new(nvxx_device(device), 32, 0, false, NULL, |
---|
| 390 | + &drm->notify); |
---|
| 391 | + if (ret) { |
---|
| 392 | + NV_ERROR(drm, "failed to allocate notifier, %d\n", ret); |
---|
| 393 | + nouveau_accel_gr_fini(drm); |
---|
| 394 | + return; |
---|
| 395 | + } |
---|
| 396 | + |
---|
| 397 | + ret = nvif_object_ctor(&drm->channel->user, "drmM2mfNtfy", |
---|
| 398 | + NvNotify0, NV_DMA_IN_MEMORY, |
---|
| 399 | + &(struct nv_dma_v0) { |
---|
| 400 | + .target = NV_DMA_V0_TARGET_VRAM, |
---|
| 401 | + .access = NV_DMA_V0_ACCESS_RDWR, |
---|
| 402 | + .start = drm->notify->addr, |
---|
| 403 | + .limit = drm->notify->addr + 31 |
---|
| 404 | + }, sizeof(struct nv_dma_v0), |
---|
| 405 | + &drm->ntfy); |
---|
| 406 | + if (ret) { |
---|
| 407 | + nouveau_accel_gr_fini(drm); |
---|
| 408 | + return; |
---|
| 409 | + } |
---|
| 410 | + } |
---|
| 411 | +} |
---|
| 412 | + |
---|
| 413 | +static void |
---|
| 414 | +nouveau_accel_fini(struct nouveau_drm *drm) |
---|
| 415 | +{ |
---|
| 416 | + nouveau_accel_ce_fini(drm); |
---|
| 417 | + nouveau_accel_gr_fini(drm); |
---|
299 | 418 | if (drm->fence) |
---|
300 | 419 | nouveau_fence(drm)->dtor(drm); |
---|
301 | 420 | } |
---|
.. | .. |
---|
305 | 424 | { |
---|
306 | 425 | struct nvif_device *device = &drm->client.device; |
---|
307 | 426 | struct nvif_sclass *sclass; |
---|
308 | | - u32 arg0, arg1; |
---|
309 | 427 | int ret, i, n; |
---|
310 | 428 | |
---|
311 | 429 | if (nouveau_noaccel) |
---|
312 | 430 | return; |
---|
313 | 431 | |
---|
| 432 | + /* Initialise global support for channels, and synchronisation. */ |
---|
314 | 433 | ret = nouveau_channels_init(drm); |
---|
315 | 434 | if (ret) |
---|
316 | 435 | return; |
---|
317 | 436 | |
---|
318 | | - if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_VOLTA) { |
---|
319 | | - ret = nvif_user_init(device); |
---|
320 | | - if (ret) |
---|
321 | | - return; |
---|
322 | | - } |
---|
323 | | - |
---|
324 | | - /* initialise synchronisation routines */ |
---|
325 | 437 | /*XXX: this is crap, but the fence/channel stuff is a little |
---|
326 | 438 | * backwards in some places. this will be fixed. |
---|
327 | 439 | */ |
---|
.. | .. |
---|
353 | 465 | case MAXWELL_CHANNEL_GPFIFO_A: |
---|
354 | 466 | case PASCAL_CHANNEL_GPFIFO_A: |
---|
355 | 467 | case VOLTA_CHANNEL_GPFIFO_A: |
---|
| 468 | + case TURING_CHANNEL_GPFIFO_A: |
---|
356 | 469 | ret = nvc0_fence_create(drm); |
---|
357 | 470 | break; |
---|
358 | 471 | default: |
---|
.. | .. |
---|
367 | 480 | return; |
---|
368 | 481 | } |
---|
369 | 482 | |
---|
370 | | - if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { |
---|
371 | | - ret = nouveau_channel_new(drm, &drm->client.device, |
---|
372 | | - nvif_fifo_runlist_ce(device), 0, |
---|
373 | | - &drm->cechan); |
---|
| 483 | + /* Volta requires access to a doorbell register for kickoff. */ |
---|
| 484 | + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_VOLTA) { |
---|
| 485 | + ret = nvif_user_ctor(device, "drmUsermode"); |
---|
374 | 486 | if (ret) |
---|
375 | | - NV_ERROR(drm, "failed to create ce channel, %d\n", ret); |
---|
376 | | - |
---|
377 | | - arg0 = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_GR); |
---|
378 | | - arg1 = 1; |
---|
379 | | - } else |
---|
380 | | - if (device->info.chipset >= 0xa3 && |
---|
381 | | - device->info.chipset != 0xaa && |
---|
382 | | - device->info.chipset != 0xac) { |
---|
383 | | - ret = nouveau_channel_new(drm, &drm->client.device, |
---|
384 | | - NvDmaFB, NvDmaTT, &drm->cechan); |
---|
385 | | - if (ret) |
---|
386 | | - NV_ERROR(drm, "failed to create ce channel, %d\n", ret); |
---|
387 | | - |
---|
388 | | - arg0 = NvDmaFB; |
---|
389 | | - arg1 = NvDmaTT; |
---|
390 | | - } else { |
---|
391 | | - arg0 = NvDmaFB; |
---|
392 | | - arg1 = NvDmaTT; |
---|
393 | | - } |
---|
394 | | - |
---|
395 | | - ret = nouveau_channel_new(drm, &drm->client.device, |
---|
396 | | - arg0, arg1, &drm->channel); |
---|
397 | | - if (ret) { |
---|
398 | | - NV_ERROR(drm, "failed to create kernel channel, %d\n", ret); |
---|
399 | | - nouveau_accel_fini(drm); |
---|
400 | | - return; |
---|
401 | | - } |
---|
402 | | - |
---|
403 | | - if (device->info.family < NV_DEVICE_INFO_V0_TESLA) { |
---|
404 | | - ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW, |
---|
405 | | - nouveau_abi16_swclass(drm), NULL, 0, |
---|
406 | | - &drm->nvsw); |
---|
407 | | - if (ret == 0) { |
---|
408 | | - ret = RING_SPACE(drm->channel, 2); |
---|
409 | | - if (ret == 0) { |
---|
410 | | - BEGIN_NV04(drm->channel, NvSubSw, 0, 1); |
---|
411 | | - OUT_RING (drm->channel, drm->nvsw.handle); |
---|
412 | | - } |
---|
413 | | - |
---|
414 | | - ret = nvif_notify_init(&drm->nvsw, |
---|
415 | | - nouveau_flip_complete, |
---|
416 | | - false, NV04_NVSW_NTFY_UEVENT, |
---|
417 | | - NULL, 0, 0, &drm->flip); |
---|
418 | | - if (ret == 0) |
---|
419 | | - ret = nvif_notify_get(&drm->flip); |
---|
420 | | - if (ret) { |
---|
421 | | - nouveau_accel_fini(drm); |
---|
422 | | - return; |
---|
423 | | - } |
---|
424 | | - } |
---|
425 | | - |
---|
426 | | - if (ret) { |
---|
427 | | - NV_ERROR(drm, "failed to allocate sw class, %d\n", ret); |
---|
428 | | - nouveau_accel_fini(drm); |
---|
429 | 487 | return; |
---|
430 | | - } |
---|
431 | 488 | } |
---|
432 | 489 | |
---|
433 | | - if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { |
---|
434 | | - ret = nvkm_gpuobj_new(nvxx_device(&drm->client.device), 32, 0, |
---|
435 | | - false, NULL, &drm->notify); |
---|
436 | | - if (ret) { |
---|
437 | | - NV_ERROR(drm, "failed to allocate notifier, %d\n", ret); |
---|
438 | | - nouveau_accel_fini(drm); |
---|
439 | | - return; |
---|
440 | | - } |
---|
| 490 | + /* Allocate channels we need to support various functions. */ |
---|
| 491 | + nouveau_accel_gr_init(drm); |
---|
| 492 | + nouveau_accel_ce_init(drm); |
---|
441 | 493 | |
---|
442 | | - ret = nvif_object_init(&drm->channel->user, NvNotify0, |
---|
443 | | - NV_DMA_IN_MEMORY, |
---|
444 | | - &(struct nv_dma_v0) { |
---|
445 | | - .target = NV_DMA_V0_TARGET_VRAM, |
---|
446 | | - .access = NV_DMA_V0_ACCESS_RDWR, |
---|
447 | | - .start = drm->notify->addr, |
---|
448 | | - .limit = drm->notify->addr + 31 |
---|
449 | | - }, sizeof(struct nv_dma_v0), |
---|
450 | | - &drm->ntfy); |
---|
451 | | - if (ret) { |
---|
452 | | - nouveau_accel_fini(drm); |
---|
453 | | - return; |
---|
454 | | - } |
---|
455 | | - } |
---|
456 | | - |
---|
457 | | - |
---|
| 494 | + /* Initialise accelerated TTM buffer moves. */ |
---|
458 | 495 | nouveau_bo_move_init(drm); |
---|
459 | 496 | } |
---|
460 | 497 | |
---|
461 | | -static int nouveau_drm_probe(struct pci_dev *pdev, |
---|
462 | | - const struct pci_device_id *pent) |
---|
| 498 | +static void __printf(2, 3) |
---|
| 499 | +nouveau_drm_errorf(struct nvif_object *object, const char *fmt, ...) |
---|
463 | 500 | { |
---|
464 | | - struct nvkm_device *device; |
---|
465 | | - struct apertures_struct *aper; |
---|
466 | | - bool boot = false; |
---|
467 | | - int ret; |
---|
| 501 | + struct nouveau_drm *drm = container_of(object->parent, typeof(*drm), parent); |
---|
| 502 | + struct va_format vaf; |
---|
| 503 | + va_list va; |
---|
468 | 504 | |
---|
469 | | - if (vga_switcheroo_client_probe_defer(pdev)) |
---|
470 | | - return -EPROBE_DEFER; |
---|
471 | | - |
---|
472 | | - /* We need to check that the chipset is supported before booting |
---|
473 | | - * fbdev off the hardware, as there's no way to put it back. |
---|
474 | | - */ |
---|
475 | | - ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device); |
---|
476 | | - if (ret) |
---|
477 | | - return ret; |
---|
478 | | - |
---|
479 | | - nvkm_device_del(&device); |
---|
480 | | - |
---|
481 | | - /* Remove conflicting drivers (vesafb, efifb etc). */ |
---|
482 | | - aper = alloc_apertures(3); |
---|
483 | | - if (!aper) |
---|
484 | | - return -ENOMEM; |
---|
485 | | - |
---|
486 | | - aper->ranges[0].base = pci_resource_start(pdev, 1); |
---|
487 | | - aper->ranges[0].size = pci_resource_len(pdev, 1); |
---|
488 | | - aper->count = 1; |
---|
489 | | - |
---|
490 | | - if (pci_resource_len(pdev, 2)) { |
---|
491 | | - aper->ranges[aper->count].base = pci_resource_start(pdev, 2); |
---|
492 | | - aper->ranges[aper->count].size = pci_resource_len(pdev, 2); |
---|
493 | | - aper->count++; |
---|
494 | | - } |
---|
495 | | - |
---|
496 | | - if (pci_resource_len(pdev, 3)) { |
---|
497 | | - aper->ranges[aper->count].base = pci_resource_start(pdev, 3); |
---|
498 | | - aper->ranges[aper->count].size = pci_resource_len(pdev, 3); |
---|
499 | | - aper->count++; |
---|
500 | | - } |
---|
501 | | - |
---|
502 | | -#ifdef CONFIG_X86 |
---|
503 | | - boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; |
---|
504 | | -#endif |
---|
505 | | - if (nouveau_modeset != 2) |
---|
506 | | - drm_fb_helper_remove_conflicting_framebuffers(aper, "nouveaufb", boot); |
---|
507 | | - kfree(aper); |
---|
508 | | - |
---|
509 | | - ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug, |
---|
510 | | - true, true, ~0ULL, &device); |
---|
511 | | - if (ret) |
---|
512 | | - return ret; |
---|
513 | | - |
---|
514 | | - pci_set_master(pdev); |
---|
515 | | - |
---|
516 | | - if (nouveau_atomic) |
---|
517 | | - driver_pci.driver_features |= DRIVER_ATOMIC; |
---|
518 | | - |
---|
519 | | - ret = drm_get_pci_dev(pdev, pent, &driver_pci); |
---|
520 | | - if (ret) { |
---|
521 | | - nvkm_device_del(&device); |
---|
522 | | - return ret; |
---|
523 | | - } |
---|
524 | | - |
---|
525 | | - return 0; |
---|
| 505 | + va_start(va, fmt); |
---|
| 506 | + vaf.fmt = fmt; |
---|
| 507 | + vaf.va = &va; |
---|
| 508 | + NV_ERROR(drm, "%pV", &vaf); |
---|
| 509 | + va_end(va); |
---|
526 | 510 | } |
---|
527 | 511 | |
---|
| 512 | +static void __printf(2, 3) |
---|
| 513 | +nouveau_drm_debugf(struct nvif_object *object, const char *fmt, ...) |
---|
| 514 | +{ |
---|
| 515 | + struct nouveau_drm *drm = container_of(object->parent, typeof(*drm), parent); |
---|
| 516 | + struct va_format vaf; |
---|
| 517 | + va_list va; |
---|
| 518 | + |
---|
| 519 | + va_start(va, fmt); |
---|
| 520 | + vaf.fmt = fmt; |
---|
| 521 | + vaf.va = &va; |
---|
| 522 | + NV_DEBUG(drm, "%pV", &vaf); |
---|
| 523 | + va_end(va); |
---|
| 524 | +} |
---|
| 525 | + |
---|
| 526 | +static const struct nvif_parent_func |
---|
| 527 | +nouveau_parent = { |
---|
| 528 | + .debugf = nouveau_drm_debugf, |
---|
| 529 | + .errorf = nouveau_drm_errorf, |
---|
| 530 | +}; |
---|
| 531 | + |
---|
528 | 532 | static int |
---|
529 | | -nouveau_drm_load(struct drm_device *dev, unsigned long flags) |
---|
| 533 | +nouveau_drm_device_init(struct drm_device *dev) |
---|
530 | 534 | { |
---|
531 | 535 | struct nouveau_drm *drm; |
---|
532 | 536 | int ret; |
---|
.. | .. |
---|
536 | 540 | dev->dev_private = drm; |
---|
537 | 541 | drm->dev = dev; |
---|
538 | 542 | |
---|
| 543 | + nvif_parent_ctor(&nouveau_parent, &drm->parent); |
---|
| 544 | + drm->master.base.object.parent = &drm->parent; |
---|
| 545 | + |
---|
539 | 546 | ret = nouveau_cli_init(drm, "DRM-master", &drm->master); |
---|
540 | 547 | if (ret) |
---|
541 | | - return ret; |
---|
| 548 | + goto fail_alloc; |
---|
542 | 549 | |
---|
543 | 550 | ret = nouveau_cli_init(drm, "DRM", &drm->client); |
---|
544 | 551 | if (ret) |
---|
545 | | - return ret; |
---|
| 552 | + goto fail_master; |
---|
546 | 553 | |
---|
547 | 554 | dev->irq_enabled = true; |
---|
548 | 555 | |
---|
.. | .. |
---|
550 | 557 | nvkm_dbgopt(nouveau_debug, "DRM"); |
---|
551 | 558 | |
---|
552 | 559 | INIT_LIST_HEAD(&drm->clients); |
---|
| 560 | + mutex_init(&drm->clients_lock); |
---|
553 | 561 | spin_lock_init(&drm->tile.lock); |
---|
554 | 562 | |
---|
555 | 563 | /* workaround an odd issue on nvc1 by disabling the device's |
---|
.. | .. |
---|
569 | 577 | if (ret) |
---|
570 | 578 | goto fail_bios; |
---|
571 | 579 | |
---|
| 580 | + nouveau_accel_init(drm); |
---|
| 581 | + |
---|
572 | 582 | ret = nouveau_display_create(dev); |
---|
573 | 583 | if (ret) |
---|
574 | 584 | goto fail_dispctor; |
---|
575 | 585 | |
---|
576 | 586 | if (dev->mode_config.num_crtc) { |
---|
577 | | - ret = nouveau_display_init(dev); |
---|
| 587 | + ret = nouveau_display_init(dev, false, false); |
---|
578 | 588 | if (ret) |
---|
579 | 589 | goto fail_dispinit; |
---|
580 | 590 | } |
---|
581 | 591 | |
---|
582 | 592 | nouveau_debugfs_init(drm); |
---|
583 | 593 | nouveau_hwmon_init(dev); |
---|
584 | | - nouveau_accel_init(drm); |
---|
| 594 | + nouveau_svm_init(drm); |
---|
| 595 | + nouveau_dmem_init(drm); |
---|
585 | 596 | nouveau_fbcon_init(dev); |
---|
586 | 597 | nouveau_led_init(dev); |
---|
587 | 598 | |
---|
.. | .. |
---|
599 | 610 | fail_dispinit: |
---|
600 | 611 | nouveau_display_destroy(dev); |
---|
601 | 612 | fail_dispctor: |
---|
| 613 | + nouveau_accel_fini(drm); |
---|
602 | 614 | nouveau_bios_takedown(dev); |
---|
603 | 615 | fail_bios: |
---|
604 | 616 | nouveau_ttm_fini(drm); |
---|
605 | 617 | fail_ttm: |
---|
606 | 618 | nouveau_vga_fini(drm); |
---|
607 | 619 | nouveau_cli_fini(&drm->client); |
---|
| 620 | +fail_master: |
---|
608 | 621 | nouveau_cli_fini(&drm->master); |
---|
| 622 | +fail_alloc: |
---|
| 623 | + nvif_parent_dtor(&drm->parent); |
---|
609 | 624 | kfree(drm); |
---|
610 | 625 | return ret; |
---|
611 | 626 | } |
---|
612 | 627 | |
---|
613 | 628 | static void |
---|
614 | | -nouveau_drm_unload(struct drm_device *dev) |
---|
| 629 | +nouveau_drm_device_fini(struct drm_device *dev) |
---|
615 | 630 | { |
---|
| 631 | + struct nouveau_cli *cli, *temp_cli; |
---|
616 | 632 | struct nouveau_drm *drm = nouveau_drm(dev); |
---|
617 | 633 | |
---|
618 | 634 | if (nouveau_pmops_runtime()) { |
---|
.. | .. |
---|
622 | 638 | |
---|
623 | 639 | nouveau_led_fini(dev); |
---|
624 | 640 | nouveau_fbcon_fini(dev); |
---|
625 | | - nouveau_accel_fini(drm); |
---|
| 641 | + nouveau_dmem_fini(drm); |
---|
| 642 | + nouveau_svm_fini(drm); |
---|
626 | 643 | nouveau_hwmon_fini(dev); |
---|
627 | 644 | nouveau_debugfs_fini(drm); |
---|
628 | 645 | |
---|
.. | .. |
---|
630 | 647 | nouveau_display_fini(dev, false, false); |
---|
631 | 648 | nouveau_display_destroy(dev); |
---|
632 | 649 | |
---|
| 650 | + nouveau_accel_fini(drm); |
---|
633 | 651 | nouveau_bios_takedown(dev); |
---|
634 | 652 | |
---|
635 | 653 | nouveau_ttm_fini(drm); |
---|
636 | 654 | nouveau_vga_fini(drm); |
---|
637 | 655 | |
---|
| 656 | + /* |
---|
| 657 | + * There may be existing clients from as-yet unclosed files. For now, |
---|
| 658 | + * clean them up here rather than deferring until the file is closed, |
---|
| 659 | + * but this likely not correct if we want to support hot-unplugging |
---|
| 660 | + * properly. |
---|
| 661 | + */ |
---|
| 662 | + mutex_lock(&drm->clients_lock); |
---|
| 663 | + list_for_each_entry_safe(cli, temp_cli, &drm->clients, head) { |
---|
| 664 | + list_del(&cli->head); |
---|
| 665 | + mutex_lock(&cli->mutex); |
---|
| 666 | + if (cli->abi16) |
---|
| 667 | + nouveau_abi16_fini(cli->abi16); |
---|
| 668 | + mutex_unlock(&cli->mutex); |
---|
| 669 | + nouveau_cli_fini(cli); |
---|
| 670 | + kfree(cli); |
---|
| 671 | + } |
---|
| 672 | + mutex_unlock(&drm->clients_lock); |
---|
| 673 | + |
---|
638 | 674 | nouveau_cli_fini(&drm->client); |
---|
639 | 675 | nouveau_cli_fini(&drm->master); |
---|
| 676 | + nvif_parent_dtor(&drm->parent); |
---|
| 677 | + mutex_destroy(&drm->clients_lock); |
---|
640 | 678 | kfree(drm); |
---|
| 679 | +} |
---|
| 680 | + |
---|
| 681 | +/* |
---|
| 682 | + * On some Intel PCIe bridge controllers doing a |
---|
| 683 | + * D0 -> D3hot -> D3cold -> D0 sequence causes Nvidia GPUs to not reappear. |
---|
| 684 | + * Skipping the intermediate D3hot step seems to make it work again. This is |
---|
| 685 | + * probably caused by not meeting the expectation the involved AML code has |
---|
| 686 | + * when the GPU is put into D3hot state before invoking it. |
---|
| 687 | + * |
---|
| 688 | + * This leads to various manifestations of this issue: |
---|
| 689 | + * - AML code execution to power on the GPU hits an infinite loop (as the |
---|
| 690 | + * code waits on device memory to change). |
---|
| 691 | + * - kernel crashes, as all PCI reads return -1, which most code isn't able |
---|
| 692 | + * to handle well enough. |
---|
| 693 | + * |
---|
| 694 | + * In all cases dmesg will contain at least one line like this: |
---|
| 695 | + * 'nouveau 0000:01:00.0: Refused to change power state, currently in D3' |
---|
| 696 | + * followed by a lot of nouveau timeouts. |
---|
| 697 | + * |
---|
| 698 | + * In the \_SB.PCI0.PEG0.PG00._OFF code deeper down writes bit 0x80 to the not |
---|
| 699 | + * documented PCI config space register 0x248 of the Intel PCIe bridge |
---|
| 700 | + * controller (0x1901) in order to change the state of the PCIe link between |
---|
| 701 | + * the PCIe port and the GPU. There are alternative code paths using other |
---|
| 702 | + * registers, which seem to work fine (executed pre Windows 8): |
---|
| 703 | + * - 0xbc bit 0x20 (publicly available documentation claims 'reserved') |
---|
| 704 | + * - 0xb0 bit 0x10 (link disable) |
---|
| 705 | + * Changing the conditions inside the firmware by poking into the relevant |
---|
| 706 | + * addresses does resolve the issue, but it seemed to be ACPI private memory |
---|
| 707 | + * and not any device accessible memory at all, so there is no portable way of |
---|
| 708 | + * changing the conditions. |
---|
| 709 | + * On a XPS 9560 that means bits [0,3] on \CPEX need to be cleared. |
---|
| 710 | + * |
---|
| 711 | + * The only systems where this behavior can be seen are hybrid graphics laptops |
---|
| 712 | + * with a secondary Nvidia Maxwell, Pascal or Turing GPU. It's unclear whether |
---|
| 713 | + * this issue only occurs in combination with listed Intel PCIe bridge |
---|
| 714 | + * controllers and the mentioned GPUs or other devices as well. |
---|
| 715 | + * |
---|
| 716 | + * documentation on the PCIe bridge controller can be found in the |
---|
| 717 | + * "7th Generation IntelĀ® Processor Families for H Platforms Datasheet Volume 2" |
---|
| 718 | + * Section "12 PCI Express* Controller (x16) Registers" |
---|
| 719 | + */ |
---|
| 720 | + |
---|
| 721 | +static void quirk_broken_nv_runpm(struct pci_dev *pdev) |
---|
| 722 | +{ |
---|
| 723 | + struct drm_device *dev = pci_get_drvdata(pdev); |
---|
| 724 | + struct nouveau_drm *drm = nouveau_drm(dev); |
---|
| 725 | + struct pci_dev *bridge = pci_upstream_bridge(pdev); |
---|
| 726 | + |
---|
| 727 | + if (!bridge || bridge->vendor != PCI_VENDOR_ID_INTEL) |
---|
| 728 | + return; |
---|
| 729 | + |
---|
| 730 | + switch (bridge->device) { |
---|
| 731 | + case 0x1901: |
---|
| 732 | + drm->old_pm_cap = pdev->pm_cap; |
---|
| 733 | + pdev->pm_cap = 0; |
---|
| 734 | + NV_INFO(drm, "Disabling PCI power management to avoid bug\n"); |
---|
| 735 | + break; |
---|
| 736 | + } |
---|
| 737 | +} |
---|
| 738 | + |
---|
| 739 | +static int nouveau_drm_probe(struct pci_dev *pdev, |
---|
| 740 | + const struct pci_device_id *pent) |
---|
| 741 | +{ |
---|
| 742 | + struct nvkm_device *device; |
---|
| 743 | + struct drm_device *drm_dev; |
---|
| 744 | + int ret; |
---|
| 745 | + |
---|
| 746 | + if (vga_switcheroo_client_probe_defer(pdev)) |
---|
| 747 | + return -EPROBE_DEFER; |
---|
| 748 | + |
---|
| 749 | + /* We need to check that the chipset is supported before booting |
---|
| 750 | + * fbdev off the hardware, as there's no way to put it back. |
---|
| 751 | + */ |
---|
| 752 | + ret = nvkm_device_pci_new(pdev, nouveau_config, "error", |
---|
| 753 | + true, false, 0, &device); |
---|
| 754 | + if (ret) |
---|
| 755 | + return ret; |
---|
| 756 | + |
---|
| 757 | + nvkm_device_del(&device); |
---|
| 758 | + |
---|
| 759 | + /* Remove conflicting drivers (vesafb, efifb etc). */ |
---|
| 760 | + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "nouveaufb"); |
---|
| 761 | + if (ret) |
---|
| 762 | + return ret; |
---|
| 763 | + |
---|
| 764 | + ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug, |
---|
| 765 | + true, true, ~0ULL, &device); |
---|
| 766 | + if (ret) |
---|
| 767 | + return ret; |
---|
| 768 | + |
---|
| 769 | + pci_set_master(pdev); |
---|
| 770 | + |
---|
| 771 | + if (nouveau_atomic) |
---|
| 772 | + driver_pci.driver_features |= DRIVER_ATOMIC; |
---|
| 773 | + |
---|
| 774 | + drm_dev = drm_dev_alloc(&driver_pci, &pdev->dev); |
---|
| 775 | + if (IS_ERR(drm_dev)) { |
---|
| 776 | + ret = PTR_ERR(drm_dev); |
---|
| 777 | + goto fail_nvkm; |
---|
| 778 | + } |
---|
| 779 | + |
---|
| 780 | + ret = pci_enable_device(pdev); |
---|
| 781 | + if (ret) |
---|
| 782 | + goto fail_drm; |
---|
| 783 | + |
---|
| 784 | + drm_dev->pdev = pdev; |
---|
| 785 | + pci_set_drvdata(pdev, drm_dev); |
---|
| 786 | + |
---|
| 787 | + ret = nouveau_drm_device_init(drm_dev); |
---|
| 788 | + if (ret) |
---|
| 789 | + goto fail_pci; |
---|
| 790 | + |
---|
| 791 | + ret = drm_dev_register(drm_dev, pent->driver_data); |
---|
| 792 | + if (ret) |
---|
| 793 | + goto fail_drm_dev_init; |
---|
| 794 | + |
---|
| 795 | + quirk_broken_nv_runpm(pdev); |
---|
| 796 | + return 0; |
---|
| 797 | + |
---|
| 798 | +fail_drm_dev_init: |
---|
| 799 | + nouveau_drm_device_fini(drm_dev); |
---|
| 800 | +fail_pci: |
---|
| 801 | + pci_disable_device(pdev); |
---|
| 802 | +fail_drm: |
---|
| 803 | + drm_dev_put(drm_dev); |
---|
| 804 | +fail_nvkm: |
---|
| 805 | + nvkm_device_del(&device); |
---|
| 806 | + return ret; |
---|
641 | 807 | } |
---|
642 | 808 | |
---|
643 | 809 | void |
---|
.. | .. |
---|
647 | 813 | struct nvkm_client *client; |
---|
648 | 814 | struct nvkm_device *device; |
---|
649 | 815 | |
---|
| 816 | + drm_dev_unplug(dev); |
---|
| 817 | + |
---|
650 | 818 | dev->irq_enabled = false; |
---|
651 | 819 | client = nvxx_client(&drm->client.base); |
---|
652 | 820 | device = nvkm_device_find(client->device); |
---|
653 | | - drm_put_dev(dev); |
---|
654 | 821 | |
---|
| 822 | + nouveau_drm_device_fini(dev); |
---|
| 823 | + drm_dev_put(dev); |
---|
655 | 824 | nvkm_device_del(&device); |
---|
656 | 825 | } |
---|
657 | 826 | |
---|
.. | .. |
---|
659 | 828 | nouveau_drm_remove(struct pci_dev *pdev) |
---|
660 | 829 | { |
---|
661 | 830 | struct drm_device *dev = pci_get_drvdata(pdev); |
---|
| 831 | + struct nouveau_drm *drm = nouveau_drm(dev); |
---|
662 | 832 | |
---|
| 833 | + /* revert our workaround */ |
---|
| 834 | + if (drm->old_pm_cap) |
---|
| 835 | + pdev->pm_cap = drm->old_pm_cap; |
---|
663 | 836 | nouveau_drm_device_remove(dev); |
---|
| 837 | + pci_disable_device(pdev); |
---|
664 | 838 | } |
---|
665 | 839 | |
---|
666 | 840 | static int |
---|
.. | .. |
---|
669 | 843 | struct nouveau_drm *drm = nouveau_drm(dev); |
---|
670 | 844 | int ret; |
---|
671 | 845 | |
---|
| 846 | + nouveau_svm_suspend(drm); |
---|
| 847 | + nouveau_dmem_suspend(drm); |
---|
672 | 848 | nouveau_led_suspend(dev); |
---|
673 | 849 | |
---|
674 | 850 | if (dev->mode_config.num_crtc) { |
---|
.. | .. |
---|
726 | 902 | static int |
---|
727 | 903 | nouveau_do_resume(struct drm_device *dev, bool runtime) |
---|
728 | 904 | { |
---|
| 905 | + int ret = 0; |
---|
729 | 906 | struct nouveau_drm *drm = nouveau_drm(dev); |
---|
730 | 907 | |
---|
731 | 908 | NV_DEBUG(drm, "resuming object tree...\n"); |
---|
732 | | - nvif_client_resume(&drm->master.base); |
---|
| 909 | + ret = nvif_client_resume(&drm->master.base); |
---|
| 910 | + if (ret) { |
---|
| 911 | + NV_ERROR(drm, "Client resume failed with error: %d\n", ret); |
---|
| 912 | + return ret; |
---|
| 913 | + } |
---|
733 | 914 | |
---|
734 | 915 | NV_DEBUG(drm, "resuming fence...\n"); |
---|
735 | 916 | if (drm->fence && nouveau_fence(drm)->resume) |
---|
.. | .. |
---|
745 | 926 | } |
---|
746 | 927 | |
---|
747 | 928 | nouveau_led_resume(dev); |
---|
748 | | - |
---|
| 929 | + nouveau_dmem_resume(drm); |
---|
| 930 | + nouveau_svm_resume(drm); |
---|
749 | 931 | return 0; |
---|
750 | 932 | } |
---|
751 | 933 | |
---|
.. | .. |
---|
792 | 974 | ret = nouveau_do_resume(drm_dev, false); |
---|
793 | 975 | |
---|
794 | 976 | /* Monitors may have been connected / disconnected during suspend */ |
---|
795 | | - schedule_work(&nouveau_drm(drm_dev)->hpd_work); |
---|
| 977 | + nouveau_display_hpd_resume(drm_dev); |
---|
796 | 978 | |
---|
797 | 979 | return ret; |
---|
798 | 980 | } |
---|
.. | .. |
---|
848 | 1030 | { |
---|
849 | 1031 | struct pci_dev *pdev = to_pci_dev(dev); |
---|
850 | 1032 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
---|
| 1033 | + struct nouveau_drm *drm = nouveau_drm(drm_dev); |
---|
851 | 1034 | struct nvif_device *device = &nouveau_drm(drm_dev)->client.device; |
---|
852 | 1035 | int ret; |
---|
853 | 1036 | |
---|
.. | .. |
---|
864 | 1047 | pci_set_master(pdev); |
---|
865 | 1048 | |
---|
866 | 1049 | ret = nouveau_do_resume(drm_dev, true); |
---|
| 1050 | + if (ret) { |
---|
| 1051 | + NV_ERROR(drm, "resume failed with: %d\n", ret); |
---|
| 1052 | + return ret; |
---|
| 1053 | + } |
---|
867 | 1054 | |
---|
868 | 1055 | /* do magic */ |
---|
869 | 1056 | nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); |
---|
870 | 1057 | drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; |
---|
871 | 1058 | |
---|
872 | 1059 | /* Monitors may have been connected / disconnected during suspend */ |
---|
873 | | - schedule_work(&nouveau_drm(drm_dev)->hpd_work); |
---|
| 1060 | + nouveau_display_hpd_resume(drm_dev); |
---|
874 | 1061 | |
---|
875 | 1062 | return ret; |
---|
876 | 1063 | } |
---|
.. | .. |
---|
920 | 1107 | |
---|
921 | 1108 | fpriv->driver_priv = cli; |
---|
922 | 1109 | |
---|
923 | | - mutex_lock(&drm->client.mutex); |
---|
| 1110 | + mutex_lock(&drm->clients_lock); |
---|
924 | 1111 | list_add(&cli->head, &drm->clients); |
---|
925 | | - mutex_unlock(&drm->client.mutex); |
---|
| 1112 | + mutex_unlock(&drm->clients_lock); |
---|
926 | 1113 | |
---|
927 | 1114 | done: |
---|
928 | 1115 | if (ret && cli) { |
---|
.. | .. |
---|
940 | 1127 | { |
---|
941 | 1128 | struct nouveau_cli *cli = nouveau_cli(fpriv); |
---|
942 | 1129 | struct nouveau_drm *drm = nouveau_drm(dev); |
---|
| 1130 | + int dev_index; |
---|
| 1131 | + |
---|
| 1132 | + /* |
---|
| 1133 | + * The device is gone, and as it currently stands all clients are |
---|
| 1134 | + * cleaned up in the removal codepath. In the future this may change |
---|
| 1135 | + * so that we can support hot-unplugging, but for now we immediately |
---|
| 1136 | + * return to avoid a double-free situation. |
---|
| 1137 | + */ |
---|
| 1138 | + if (!drm_dev_enter(dev, &dev_index)) |
---|
| 1139 | + return; |
---|
943 | 1140 | |
---|
944 | 1141 | pm_runtime_get_sync(dev->dev); |
---|
945 | 1142 | |
---|
.. | .. |
---|
948 | 1145 | nouveau_abi16_fini(cli->abi16); |
---|
949 | 1146 | mutex_unlock(&cli->mutex); |
---|
950 | 1147 | |
---|
951 | | - mutex_lock(&drm->client.mutex); |
---|
| 1148 | + mutex_lock(&drm->clients_lock); |
---|
952 | 1149 | list_del(&cli->head); |
---|
953 | | - mutex_unlock(&drm->client.mutex); |
---|
| 1150 | + mutex_unlock(&drm->clients_lock); |
---|
954 | 1151 | |
---|
955 | 1152 | nouveau_cli_fini(cli); |
---|
956 | 1153 | kfree(cli); |
---|
957 | 1154 | pm_runtime_mark_last_busy(dev->dev); |
---|
958 | 1155 | pm_runtime_put_autosuspend(dev->dev); |
---|
| 1156 | + drm_dev_exit(dev_index); |
---|
959 | 1157 | } |
---|
960 | 1158 | |
---|
961 | 1159 | static const struct drm_ioctl_desc |
---|
962 | 1160 | nouveau_ioctls[] = { |
---|
963 | | - DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_AUTH|DRM_RENDER_ALLOW), |
---|
964 | | - DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
---|
965 | | - DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_AUTH|DRM_RENDER_ALLOW), |
---|
966 | | - DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_AUTH|DRM_RENDER_ALLOW), |
---|
967 | | - DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_AUTH|DRM_RENDER_ALLOW), |
---|
968 | | - DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_AUTH|DRM_RENDER_ALLOW), |
---|
969 | | - DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_AUTH|DRM_RENDER_ALLOW), |
---|
970 | | - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH|DRM_RENDER_ALLOW), |
---|
971 | | - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH|DRM_RENDER_ALLOW), |
---|
972 | | - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW), |
---|
973 | | - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW), |
---|
974 | | - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH|DRM_RENDER_ALLOW), |
---|
| 1161 | + DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_RENDER_ALLOW), |
---|
| 1162 | + DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
---|
| 1163 | + DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_RENDER_ALLOW), |
---|
| 1164 | + DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_RENDER_ALLOW), |
---|
| 1165 | + DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_RENDER_ALLOW), |
---|
| 1166 | + DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_RENDER_ALLOW), |
---|
| 1167 | + DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_RENDER_ALLOW), |
---|
| 1168 | + DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_INIT, nouveau_svmm_init, DRM_RENDER_ALLOW), |
---|
| 1169 | + DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_BIND, nouveau_svmm_bind, DRM_RENDER_ALLOW), |
---|
| 1170 | + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_RENDER_ALLOW), |
---|
| 1171 | + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_RENDER_ALLOW), |
---|
| 1172 | + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_RENDER_ALLOW), |
---|
| 1173 | + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_RENDER_ALLOW), |
---|
| 1174 | + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_RENDER_ALLOW), |
---|
975 | 1175 | }; |
---|
976 | 1176 | |
---|
977 | 1177 | long |
---|
.. | .. |
---|
1019 | 1219 | static struct drm_driver |
---|
1020 | 1220 | driver_stub = { |
---|
1021 | 1221 | .driver_features = |
---|
1022 | | - DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
---|
| 1222 | + DRIVER_GEM | DRIVER_MODESET | DRIVER_RENDER |
---|
1023 | 1223 | #if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT) |
---|
1024 | 1224 | | DRIVER_KMS_LEGACY_CONTEXT |
---|
1025 | 1225 | #endif |
---|
1026 | 1226 | , |
---|
1027 | 1227 | |
---|
1028 | | - .load = nouveau_drm_load, |
---|
1029 | | - .unload = nouveau_drm_unload, |
---|
1030 | 1228 | .open = nouveau_drm_open, |
---|
1031 | 1229 | .postclose = nouveau_drm_postclose, |
---|
1032 | 1230 | .lastclose = nouveau_vga_lastclose, |
---|
.. | .. |
---|
1035 | 1233 | .debugfs_init = nouveau_drm_debugfs_init, |
---|
1036 | 1234 | #endif |
---|
1037 | 1235 | |
---|
1038 | | - .enable_vblank = nouveau_display_vblank_enable, |
---|
1039 | | - .disable_vblank = nouveau_display_vblank_disable, |
---|
1040 | | - .get_scanout_position = nouveau_display_scanoutpos, |
---|
1041 | | - .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos, |
---|
1042 | | - |
---|
1043 | 1236 | .ioctls = nouveau_ioctls, |
---|
1044 | 1237 | .num_ioctls = ARRAY_SIZE(nouveau_ioctls), |
---|
1045 | 1238 | .fops = &nouveau_driver_fops, |
---|
1046 | 1239 | |
---|
1047 | 1240 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, |
---|
1048 | 1241 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, |
---|
1049 | | - .gem_prime_export = drm_gem_prime_export, |
---|
1050 | | - .gem_prime_import = drm_gem_prime_import, |
---|
1051 | 1242 | .gem_prime_pin = nouveau_gem_prime_pin, |
---|
1052 | | - .gem_prime_res_obj = nouveau_gem_prime_res_obj, |
---|
1053 | 1243 | .gem_prime_unpin = nouveau_gem_prime_unpin, |
---|
1054 | 1244 | .gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table, |
---|
1055 | 1245 | .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table, |
---|
.. | .. |
---|
1147 | 1337 | goto err_free; |
---|
1148 | 1338 | } |
---|
1149 | 1339 | |
---|
| 1340 | + err = nouveau_drm_device_init(drm); |
---|
| 1341 | + if (err) |
---|
| 1342 | + goto err_put; |
---|
| 1343 | + |
---|
1150 | 1344 | platform_set_drvdata(pdev, drm); |
---|
1151 | 1345 | |
---|
1152 | 1346 | return drm; |
---|
1153 | 1347 | |
---|
| 1348 | +err_put: |
---|
| 1349 | + drm_dev_put(drm); |
---|
1154 | 1350 | err_free: |
---|
1155 | 1351 | nvkm_device_del(pdevice); |
---|
1156 | 1352 | |
---|
.. | .. |
---|
1202 | 1398 | #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER |
---|
1203 | 1399 | platform_driver_unregister(&nouveau_platform_driver); |
---|
1204 | 1400 | #endif |
---|
| 1401 | + if (IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM)) |
---|
| 1402 | + mmu_notifier_synchronize(); |
---|
1205 | 1403 | } |
---|
1206 | 1404 | |
---|
1207 | 1405 | module_init(nouveau_drm_init); |
---|