.. | .. |
---|
31 | 31 | * OTHER DEALINGS IN THE SOFTWARE. |
---|
32 | 32 | */ |
---|
33 | 33 | |
---|
| 34 | +#include <linux/anon_inodes.h> |
---|
| 35 | +#include <linux/dma-fence.h> |
---|
| 36 | +#include <linux/file.h> |
---|
| 37 | +#include <linux/module.h> |
---|
| 38 | +#include <linux/pci.h> |
---|
34 | 39 | #include <linux/poll.h> |
---|
35 | 40 | #include <linux/slab.h> |
---|
36 | | -#include <linux/module.h> |
---|
37 | 41 | |
---|
38 | 42 | #include <drm/drm_client.h> |
---|
| 43 | +#include <drm/drm_drv.h> |
---|
39 | 44 | #include <drm/drm_file.h> |
---|
40 | | -#include <drm/drmP.h> |
---|
| 45 | +#include <drm/drm_print.h> |
---|
41 | 46 | |
---|
42 | | -#include "drm_legacy.h" |
---|
43 | | -#include "drm_internal.h" |
---|
44 | 47 | #include "drm_crtc_internal.h" |
---|
| 48 | +#include "drm_internal.h" |
---|
| 49 | +#include "drm_legacy.h" |
---|
| 50 | + |
---|
| 51 | +#if defined(CONFIG_MMU) && defined(CONFIG_TRANSPARENT_HUGEPAGE) |
---|
| 52 | +#include <uapi/asm/mman.h> |
---|
| 53 | +#include <drm/drm_vma_manager.h> |
---|
| 54 | +#endif |
---|
45 | 55 | |
---|
46 | 56 | /* from BKL pushdown */ |
---|
47 | 57 | DEFINE_MUTEX(drm_global_mutex); |
---|
48 | 58 | |
---|
49 | | -#define MAX_DRM_OPEN_COUNT 128 |
---|
| 59 | +bool drm_dev_needs_global_mutex(struct drm_device *dev) |
---|
| 60 | +{ |
---|
| 61 | + /* |
---|
| 62 | + * Legacy drivers rely on all kinds of BKL locking semantics, don't |
---|
| 63 | + * bother. They also still need BKL locking for their ioctls, so better |
---|
| 64 | + * safe than sorry. |
---|
| 65 | + */ |
---|
| 66 | + if (drm_core_check_feature(dev, DRIVER_LEGACY)) |
---|
| 67 | + return true; |
---|
| 68 | + |
---|
| 69 | + /* |
---|
| 70 | + * The deprecated ->load callback must be called after the driver is |
---|
| 71 | + * already registered. This means such drivers rely on the BKL to make |
---|
| 72 | + * sure an open can't proceed until the driver is actually fully set up. |
---|
| 73 | + * Similar hilarity holds for the unload callback. |
---|
| 74 | + */ |
---|
| 75 | + if (dev->driver->load || dev->driver->unload) |
---|
| 76 | + return true; |
---|
| 77 | + |
---|
| 78 | + /* |
---|
| 79 | + * Drivers with the lastclose callback assume that it's synchronized |
---|
| 80 | + * against concurrent opens, which again needs the BKL. The proper fix |
---|
| 81 | + * is to use the drm_client infrastructure with proper locking for each |
---|
| 82 | + * client. |
---|
| 83 | + */ |
---|
| 84 | + if (dev->driver->lastclose) |
---|
| 85 | + return true; |
---|
| 86 | + |
---|
| 87 | + return false; |
---|
| 88 | +} |
---|
50 | 89 | |
---|
51 | 90 | /** |
---|
52 | 91 | * DOC: file operations |
---|
.. | .. |
---|
102 | 141 | * :ref:`IOCTL support in the userland interfaces chapter<drm_driver_ioctl>`. |
---|
103 | 142 | */ |
---|
104 | 143 | |
---|
105 | | -static int drm_open_helper(struct file *filp, struct drm_minor *minor); |
---|
106 | | - |
---|
107 | 144 | /** |
---|
108 | 145 | * drm_file_alloc - allocate file context |
---|
109 | 146 | * @minor: minor to allocate on |
---|
.. | .. |
---|
130 | 167 | |
---|
131 | 168 | /* for compatibility root is always authenticated */ |
---|
132 | 169 | file->authenticated = capable(CAP_SYS_ADMIN); |
---|
133 | | - file->lock_count = 0; |
---|
134 | 170 | |
---|
135 | 171 | INIT_LIST_HEAD(&file->lhead); |
---|
136 | 172 | INIT_LIST_HEAD(&file->fbs); |
---|
.. | .. |
---|
149 | 185 | if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) |
---|
150 | 186 | drm_syncobj_open(file); |
---|
151 | 187 | |
---|
152 | | - if (drm_core_check_feature(dev, DRIVER_PRIME)) |
---|
153 | | - drm_prime_init_file_private(&file->prime); |
---|
| 188 | + drm_prime_init_file_private(&file->prime); |
---|
154 | 189 | |
---|
155 | 190 | if (dev->driver->open) { |
---|
156 | 191 | ret = dev->driver->open(dev, file); |
---|
.. | .. |
---|
161 | 196 | return file; |
---|
162 | 197 | |
---|
163 | 198 | out_prime_destroy: |
---|
164 | | - if (drm_core_check_feature(dev, DRIVER_PRIME)) |
---|
165 | | - drm_prime_destroy_file_private(&file->prime); |
---|
| 199 | + drm_prime_destroy_file_private(&file->prime); |
---|
166 | 200 | if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) |
---|
167 | 201 | drm_syncobj_release(file); |
---|
168 | 202 | if (drm_core_check_feature(dev, DRIVER_GEM)) |
---|
.. | .. |
---|
219 | 253 | |
---|
220 | 254 | dev = file->minor->dev; |
---|
221 | 255 | |
---|
222 | | - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", |
---|
223 | | - task_pid_nr(current), |
---|
| 256 | + DRM_DEBUG("comm=\"%s\", pid=%d, dev=0x%lx, open_count=%d\n", |
---|
| 257 | + current->comm, task_pid_nr(current), |
---|
224 | 258 | (long)old_encode_dev(file->minor->kdev->devt), |
---|
225 | | - dev->open_count); |
---|
| 259 | + atomic_read(&dev->open_count)); |
---|
226 | 260 | |
---|
227 | 261 | if (drm_core_check_feature(dev, DRIVER_LEGACY) && |
---|
228 | 262 | dev->driver->preclose) |
---|
.. | .. |
---|
255 | 289 | if (dev->driver->postclose) |
---|
256 | 290 | dev->driver->postclose(dev, file); |
---|
257 | 291 | |
---|
258 | | - if (drm_core_check_feature(dev, DRIVER_PRIME)) |
---|
259 | | - drm_prime_destroy_file_private(&file->prime); |
---|
| 292 | + drm_prime_destroy_file_private(&file->prime); |
---|
260 | 293 | |
---|
261 | 294 | WARN_ON(!list_empty(&file->event_list)); |
---|
262 | 295 | |
---|
.. | .. |
---|
276 | 309 | drm_file_free(file_priv); |
---|
277 | 310 | } |
---|
278 | 311 | |
---|
279 | | -static int drm_setup(struct drm_device * dev) |
---|
| 312 | +/* |
---|
| 313 | + * Check whether DRI will run on this CPU. |
---|
| 314 | + * |
---|
| 315 | + * \return non-zero if the DRI will run on this CPU, or zero otherwise. |
---|
| 316 | + */ |
---|
| 317 | +static int drm_cpu_valid(void) |
---|
280 | 318 | { |
---|
| 319 | +#if defined(__sparc__) && !defined(__sparc_v9__) |
---|
| 320 | + return 0; /* No cmpxchg before v9 sparc. */ |
---|
| 321 | +#endif |
---|
| 322 | + return 1; |
---|
| 323 | +} |
---|
| 324 | + |
---|
| 325 | +/* |
---|
| 326 | + * Called whenever a process opens a drm node |
---|
| 327 | + * |
---|
| 328 | + * \param filp file pointer. |
---|
| 329 | + * \param minor acquired minor-object. |
---|
| 330 | + * \return zero on success or a negative number on failure. |
---|
| 331 | + * |
---|
| 332 | + * Creates and initializes a drm_file structure for the file private data in \p |
---|
| 333 | + * filp and add it into the double linked list in \p dev. |
---|
| 334 | + */ |
---|
| 335 | +static int drm_open_helper(struct file *filp, struct drm_minor *minor) |
---|
| 336 | +{ |
---|
| 337 | + struct drm_device *dev = minor->dev; |
---|
| 338 | + struct drm_file *priv; |
---|
281 | 339 | int ret; |
---|
282 | 340 | |
---|
283 | | - if (dev->driver->firstopen && |
---|
284 | | - drm_core_check_feature(dev, DRIVER_LEGACY)) { |
---|
285 | | - ret = dev->driver->firstopen(dev); |
---|
286 | | - if (ret != 0) |
---|
| 341 | + if (filp->f_flags & O_EXCL) |
---|
| 342 | + return -EBUSY; /* No exclusive opens */ |
---|
| 343 | + if (!drm_cpu_valid()) |
---|
| 344 | + return -EINVAL; |
---|
| 345 | + if (dev->switch_power_state != DRM_SWITCH_POWER_ON && |
---|
| 346 | + dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF) |
---|
| 347 | + return -EINVAL; |
---|
| 348 | + |
---|
| 349 | + DRM_DEBUG("comm=\"%s\", pid=%d, minor=%d\n", current->comm, |
---|
| 350 | + task_pid_nr(current), minor->index); |
---|
| 351 | + |
---|
| 352 | + priv = drm_file_alloc(minor); |
---|
| 353 | + if (IS_ERR(priv)) |
---|
| 354 | + return PTR_ERR(priv); |
---|
| 355 | + |
---|
| 356 | + if (drm_is_primary_client(priv)) { |
---|
| 357 | + ret = drm_master_open(priv); |
---|
| 358 | + if (ret) { |
---|
| 359 | + drm_file_free(priv); |
---|
287 | 360 | return ret; |
---|
| 361 | + } |
---|
288 | 362 | } |
---|
289 | 363 | |
---|
290 | | - ret = drm_legacy_dma_setup(dev); |
---|
291 | | - if (ret < 0) |
---|
292 | | - return ret; |
---|
| 364 | + filp->private_data = priv; |
---|
| 365 | + filp->f_mode |= FMODE_UNSIGNED_OFFSET; |
---|
| 366 | + priv->filp = filp; |
---|
293 | 367 | |
---|
| 368 | + mutex_lock(&dev->filelist_mutex); |
---|
| 369 | + list_add(&priv->lhead, &dev->filelist); |
---|
| 370 | + mutex_unlock(&dev->filelist_mutex); |
---|
294 | 371 | |
---|
295 | | - DRM_DEBUG("\n"); |
---|
| 372 | +#ifdef __alpha__ |
---|
| 373 | + /* |
---|
| 374 | + * Default the hose |
---|
| 375 | + */ |
---|
| 376 | + if (!dev->hose) { |
---|
| 377 | + struct pci_dev *pci_dev; |
---|
| 378 | + |
---|
| 379 | + pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); |
---|
| 380 | + if (pci_dev) { |
---|
| 381 | + dev->hose = pci_dev->sysdata; |
---|
| 382 | + pci_dev_put(pci_dev); |
---|
| 383 | + } |
---|
| 384 | + if (!dev->hose) { |
---|
| 385 | + struct pci_bus *b = list_entry(pci_root_buses.next, |
---|
| 386 | + struct pci_bus, node); |
---|
| 387 | + if (b) |
---|
| 388 | + dev->hose = b->sysdata; |
---|
| 389 | + } |
---|
| 390 | + } |
---|
| 391 | +#endif |
---|
| 392 | + |
---|
296 | 393 | return 0; |
---|
297 | 394 | } |
---|
298 | 395 | |
---|
.. | .. |
---|
321 | 418 | return PTR_ERR(minor); |
---|
322 | 419 | |
---|
323 | 420 | dev = minor->dev; |
---|
324 | | - if (!dev->open_count++) |
---|
325 | | - need_setup = 1; |
---|
| 421 | + if (drm_dev_needs_global_mutex(dev)) |
---|
| 422 | + mutex_lock(&drm_global_mutex); |
---|
326 | 423 | |
---|
327 | | - if (dev->open_count >= MAX_DRM_OPEN_COUNT) { |
---|
328 | | - retcode = -EPERM; |
---|
329 | | - goto err_undo; |
---|
330 | | - } |
---|
| 424 | + if (!atomic_fetch_inc(&dev->open_count)) |
---|
| 425 | + need_setup = 1; |
---|
331 | 426 | |
---|
332 | 427 | /* share address_space across all char-devs of a single device */ |
---|
333 | 428 | filp->f_mapping = dev->anon_inode->i_mapping; |
---|
.. | .. |
---|
336 | 431 | if (retcode) |
---|
337 | 432 | goto err_undo; |
---|
338 | 433 | if (need_setup) { |
---|
339 | | - retcode = drm_setup(dev); |
---|
| 434 | + retcode = drm_legacy_setup(dev); |
---|
340 | 435 | if (retcode) { |
---|
341 | 436 | drm_close_helper(filp); |
---|
342 | 437 | goto err_undo; |
---|
343 | 438 | } |
---|
344 | 439 | } |
---|
| 440 | + |
---|
| 441 | + if (drm_dev_needs_global_mutex(dev)) |
---|
| 442 | + mutex_unlock(&drm_global_mutex); |
---|
| 443 | + |
---|
345 | 444 | return 0; |
---|
346 | 445 | |
---|
347 | 446 | err_undo: |
---|
348 | | - dev->open_count--; |
---|
| 447 | + atomic_dec(&dev->open_count); |
---|
| 448 | + if (drm_dev_needs_global_mutex(dev)) |
---|
| 449 | + mutex_unlock(&drm_global_mutex); |
---|
349 | 450 | drm_minor_release(minor); |
---|
350 | 451 | return retcode; |
---|
351 | 452 | } |
---|
352 | 453 | EXPORT_SYMBOL(drm_open); |
---|
353 | | - |
---|
354 | | -/* |
---|
355 | | - * Check whether DRI will run on this CPU. |
---|
356 | | - * |
---|
357 | | - * \return non-zero if the DRI will run on this CPU, or zero otherwise. |
---|
358 | | - */ |
---|
359 | | -static int drm_cpu_valid(void) |
---|
360 | | -{ |
---|
361 | | -#if defined(__sparc__) && !defined(__sparc_v9__) |
---|
362 | | - return 0; /* No cmpxchg before v9 sparc. */ |
---|
363 | | -#endif |
---|
364 | | - return 1; |
---|
365 | | -} |
---|
366 | | - |
---|
367 | | -/* |
---|
368 | | - * Called whenever a process opens /dev/drm. |
---|
369 | | - * |
---|
370 | | - * \param filp file pointer. |
---|
371 | | - * \param minor acquired minor-object. |
---|
372 | | - * \return zero on success or a negative number on failure. |
---|
373 | | - * |
---|
374 | | - * Creates and initializes a drm_file structure for the file private data in \p |
---|
375 | | - * filp and add it into the double linked list in \p dev. |
---|
376 | | - */ |
---|
377 | | -static int drm_open_helper(struct file *filp, struct drm_minor *minor) |
---|
378 | | -{ |
---|
379 | | - struct drm_device *dev = minor->dev; |
---|
380 | | - struct drm_file *priv; |
---|
381 | | - int ret; |
---|
382 | | - |
---|
383 | | - if (filp->f_flags & O_EXCL) |
---|
384 | | - return -EBUSY; /* No exclusive opens */ |
---|
385 | | - if (!drm_cpu_valid()) |
---|
386 | | - return -EINVAL; |
---|
387 | | - if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF) |
---|
388 | | - return -EINVAL; |
---|
389 | | - |
---|
390 | | - DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index); |
---|
391 | | - |
---|
392 | | - priv = drm_file_alloc(minor); |
---|
393 | | - if (IS_ERR(priv)) |
---|
394 | | - return PTR_ERR(priv); |
---|
395 | | - |
---|
396 | | - if (drm_is_primary_client(priv)) { |
---|
397 | | - ret = drm_master_open(priv); |
---|
398 | | - if (ret) { |
---|
399 | | - drm_file_free(priv); |
---|
400 | | - return ret; |
---|
401 | | - } |
---|
402 | | - } |
---|
403 | | - |
---|
404 | | - filp->private_data = priv; |
---|
405 | | - filp->f_mode |= FMODE_UNSIGNED_OFFSET; |
---|
406 | | - priv->filp = filp; |
---|
407 | | - |
---|
408 | | - mutex_lock(&dev->filelist_mutex); |
---|
409 | | - list_add(&priv->lhead, &dev->filelist); |
---|
410 | | - mutex_unlock(&dev->filelist_mutex); |
---|
411 | | - |
---|
412 | | -#ifdef __alpha__ |
---|
413 | | - /* |
---|
414 | | - * Default the hose |
---|
415 | | - */ |
---|
416 | | - if (!dev->hose) { |
---|
417 | | - struct pci_dev *pci_dev; |
---|
418 | | - pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); |
---|
419 | | - if (pci_dev) { |
---|
420 | | - dev->hose = pci_dev->sysdata; |
---|
421 | | - pci_dev_put(pci_dev); |
---|
422 | | - } |
---|
423 | | - if (!dev->hose) { |
---|
424 | | - struct pci_bus *b = list_entry(pci_root_buses.next, |
---|
425 | | - struct pci_bus, node); |
---|
426 | | - if (b) |
---|
427 | | - dev->hose = b->sysdata; |
---|
428 | | - } |
---|
429 | | - } |
---|
430 | | -#endif |
---|
431 | | - |
---|
432 | | - return 0; |
---|
433 | | -} |
---|
434 | | - |
---|
435 | | -static void drm_legacy_dev_reinit(struct drm_device *dev) |
---|
436 | | -{ |
---|
437 | | - if (dev->irq_enabled) |
---|
438 | | - drm_irq_uninstall(dev); |
---|
439 | | - |
---|
440 | | - mutex_lock(&dev->struct_mutex); |
---|
441 | | - |
---|
442 | | - drm_legacy_agp_clear(dev); |
---|
443 | | - |
---|
444 | | - drm_legacy_sg_cleanup(dev); |
---|
445 | | - drm_legacy_vma_flush(dev); |
---|
446 | | - drm_legacy_dma_takedown(dev); |
---|
447 | | - |
---|
448 | | - mutex_unlock(&dev->struct_mutex); |
---|
449 | | - |
---|
450 | | - dev->sigdata.lock = NULL; |
---|
451 | | - |
---|
452 | | - dev->context_flag = 0; |
---|
453 | | - dev->last_context = 0; |
---|
454 | | - dev->if_version = 0; |
---|
455 | | - |
---|
456 | | - DRM_DEBUG("lastclose completed\n"); |
---|
457 | | -} |
---|
458 | 454 | |
---|
459 | 455 | void drm_lastclose(struct drm_device * dev) |
---|
460 | 456 | { |
---|
.. | .. |
---|
490 | 486 | struct drm_minor *minor = file_priv->minor; |
---|
491 | 487 | struct drm_device *dev = minor->dev; |
---|
492 | 488 | |
---|
493 | | - mutex_lock(&drm_global_mutex); |
---|
| 489 | + if (drm_dev_needs_global_mutex(dev)) |
---|
| 490 | + mutex_lock(&drm_global_mutex); |
---|
494 | 491 | |
---|
495 | | - DRM_DEBUG("open_count = %d\n", dev->open_count); |
---|
| 492 | + DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count)); |
---|
496 | 493 | |
---|
497 | 494 | drm_close_helper(filp); |
---|
498 | 495 | |
---|
499 | | - if (!--dev->open_count) |
---|
| 496 | + if (atomic_dec_and_test(&dev->open_count)) |
---|
500 | 497 | drm_lastclose(dev); |
---|
501 | 498 | |
---|
502 | | - mutex_unlock(&drm_global_mutex); |
---|
| 499 | + if (drm_dev_needs_global_mutex(dev)) |
---|
| 500 | + mutex_unlock(&drm_global_mutex); |
---|
503 | 501 | |
---|
504 | 502 | drm_minor_release(minor); |
---|
505 | 503 | |
---|
506 | 504 | return 0; |
---|
507 | 505 | } |
---|
508 | 506 | EXPORT_SYMBOL(drm_release); |
---|
| 507 | + |
---|
| 508 | +/** |
---|
| 509 | + * drm_release_noglobal - release method for DRM file |
---|
| 510 | + * @inode: device inode |
---|
| 511 | + * @filp: file pointer. |
---|
| 512 | + * |
---|
| 513 | + * This function may be used by drivers as their &file_operations.release |
---|
| 514 | + * method. It frees any resources associated with the open file prior to taking |
---|
| 515 | + * the drm_global_mutex, which then calls the &drm_driver.postclose driver |
---|
| 516 | + * callback. If this is the last open file for the DRM device also proceeds to |
---|
| 517 | + * call the &drm_driver.lastclose driver callback. |
---|
| 518 | + * |
---|
| 519 | + * RETURNS: |
---|
| 520 | + * |
---|
| 521 | + * Always succeeds and returns 0. |
---|
| 522 | + */ |
---|
| 523 | +int drm_release_noglobal(struct inode *inode, struct file *filp) |
---|
| 524 | +{ |
---|
| 525 | + struct drm_file *file_priv = filp->private_data; |
---|
| 526 | + struct drm_minor *minor = file_priv->minor; |
---|
| 527 | + struct drm_device *dev = minor->dev; |
---|
| 528 | + |
---|
| 529 | + drm_close_helper(filp); |
---|
| 530 | + |
---|
| 531 | + if (atomic_dec_and_mutex_lock(&dev->open_count, &drm_global_mutex)) { |
---|
| 532 | + drm_lastclose(dev); |
---|
| 533 | + mutex_unlock(&drm_global_mutex); |
---|
| 534 | + } |
---|
| 535 | + |
---|
| 536 | + drm_minor_release(minor); |
---|
| 537 | + |
---|
| 538 | + return 0; |
---|
| 539 | +} |
---|
| 540 | +EXPORT_SYMBOL(drm_release_noglobal); |
---|
509 | 541 | |
---|
510 | 542 | /** |
---|
511 | 543 | * drm_read - read method for DRM file |
---|
.. | .. |
---|
539 | 571 | struct drm_file *file_priv = filp->private_data; |
---|
540 | 572 | struct drm_device *dev = file_priv->minor->dev; |
---|
541 | 573 | ssize_t ret; |
---|
542 | | - |
---|
543 | | - if (!access_ok(VERIFY_WRITE, buffer, count)) |
---|
544 | | - return -EFAULT; |
---|
545 | 574 | |
---|
546 | 575 | ret = mutex_lock_interruptible(&file_priv->event_read_lock); |
---|
547 | 576 | if (ret) |
---|
.. | .. |
---|
584 | 613 | file_priv->event_space -= length; |
---|
585 | 614 | list_add(&e->link, &file_priv->event_list); |
---|
586 | 615 | spin_unlock_irq(&dev->event_lock); |
---|
587 | | - wake_up_interruptible(&file_priv->event_wait); |
---|
| 616 | + wake_up_interruptible_poll(&file_priv->event_wait, |
---|
| 617 | + EPOLLIN | EPOLLRDNORM); |
---|
588 | 618 | break; |
---|
589 | 619 | } |
---|
590 | 620 | |
---|
.. | .. |
---|
717 | 747 | EXPORT_SYMBOL(drm_event_reserve_init); |
---|
718 | 748 | |
---|
719 | 749 | /** |
---|
720 | | - * drm_event_cancel_free - free a DRM event and release it's space |
---|
| 750 | + * drm_event_cancel_free - free a DRM event and release its space |
---|
721 | 751 | * @dev: DRM device |
---|
722 | 752 | * @p: tracking structure for the pending event |
---|
723 | 753 | * |
---|
.. | .. |
---|
729 | 759 | struct drm_pending_event *p) |
---|
730 | 760 | { |
---|
731 | 761 | unsigned long flags; |
---|
| 762 | + |
---|
732 | 763 | spin_lock_irqsave(&dev->event_lock, flags); |
---|
733 | 764 | if (p->file_priv) { |
---|
734 | 765 | p->file_priv->event_space += p->event->length; |
---|
.. | .. |
---|
742 | 773 | kfree(p); |
---|
743 | 774 | } |
---|
744 | 775 | EXPORT_SYMBOL(drm_event_cancel_free); |
---|
| 776 | + |
---|
| 777 | +/** |
---|
| 778 | + * drm_send_event_helper - send DRM event to file descriptor |
---|
| 779 | + * @dev: DRM device |
---|
| 780 | + * @e: DRM event to deliver |
---|
| 781 | + * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC |
---|
| 782 | + * time domain |
---|
| 783 | + * |
---|
| 784 | + * This helper function sends the event @e, initialized with |
---|
| 785 | + * drm_event_reserve_init(), to its associated userspace DRM file. |
---|
| 786 | + * The timestamp variant of dma_fence_signal is used when the caller |
---|
| 787 | + * sends a valid timestamp. |
---|
| 788 | + */ |
---|
| 789 | +void drm_send_event_helper(struct drm_device *dev, |
---|
| 790 | + struct drm_pending_event *e, ktime_t timestamp) |
---|
| 791 | +{ |
---|
| 792 | + assert_spin_locked(&dev->event_lock); |
---|
| 793 | + |
---|
| 794 | + if (e->completion) { |
---|
| 795 | + complete_all(e->completion); |
---|
| 796 | + e->completion_release(e->completion); |
---|
| 797 | + e->completion = NULL; |
---|
| 798 | + } |
---|
| 799 | + |
---|
| 800 | + if (e->fence) { |
---|
| 801 | + if (timestamp) |
---|
| 802 | + dma_fence_signal_timestamp(e->fence, timestamp); |
---|
| 803 | + else |
---|
| 804 | + dma_fence_signal(e->fence); |
---|
| 805 | + dma_fence_put(e->fence); |
---|
| 806 | + } |
---|
| 807 | + |
---|
| 808 | + if (!e->file_priv) { |
---|
| 809 | + kfree(e); |
---|
| 810 | + return; |
---|
| 811 | + } |
---|
| 812 | + |
---|
| 813 | + list_del(&e->pending_link); |
---|
| 814 | + list_add_tail(&e->link, |
---|
| 815 | + &e->file_priv->event_list); |
---|
| 816 | + wake_up_interruptible_poll(&e->file_priv->event_wait, |
---|
| 817 | + EPOLLIN | EPOLLRDNORM); |
---|
| 818 | +} |
---|
| 819 | + |
---|
| 820 | +/** |
---|
| 821 | + * drm_send_event_timestamp_locked - send DRM event to file descriptor |
---|
| 822 | + * @dev: DRM device |
---|
| 823 | + * @e: DRM event to deliver |
---|
| 824 | + * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC |
---|
| 825 | + * time domain |
---|
| 826 | + * |
---|
| 827 | + * This function sends the event @e, initialized with drm_event_reserve_init(), |
---|
| 828 | + * to its associated userspace DRM file. Callers must already hold |
---|
| 829 | + * &drm_device.event_lock. |
---|
| 830 | + * |
---|
| 831 | + * Note that the core will take care of unlinking and disarming events when the |
---|
| 832 | + * corresponding DRM file is closed. Drivers need not worry about whether the |
---|
| 833 | + * DRM file for this event still exists and can call this function upon |
---|
| 834 | + * completion of the asynchronous work unconditionally. |
---|
| 835 | + */ |
---|
| 836 | +void drm_send_event_timestamp_locked(struct drm_device *dev, |
---|
| 837 | + struct drm_pending_event *e, ktime_t timestamp) |
---|
| 838 | +{ |
---|
| 839 | + drm_send_event_helper(dev, e, timestamp); |
---|
| 840 | +} |
---|
| 841 | +EXPORT_SYMBOL(drm_send_event_timestamp_locked); |
---|
745 | 842 | |
---|
746 | 843 | /** |
---|
747 | 844 | * drm_send_event_locked - send DRM event to file descriptor |
---|
.. | .. |
---|
759 | 856 | */ |
---|
760 | 857 | void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) |
---|
761 | 858 | { |
---|
762 | | - assert_spin_locked(&dev->event_lock); |
---|
763 | | - |
---|
764 | | - if (e->completion) { |
---|
765 | | - complete_all(e->completion); |
---|
766 | | - e->completion_release(e->completion); |
---|
767 | | - e->completion = NULL; |
---|
768 | | - } |
---|
769 | | - |
---|
770 | | - if (e->fence) { |
---|
771 | | - dma_fence_signal(e->fence); |
---|
772 | | - dma_fence_put(e->fence); |
---|
773 | | - } |
---|
774 | | - |
---|
775 | | - if (!e->file_priv) { |
---|
776 | | - kfree(e); |
---|
777 | | - return; |
---|
778 | | - } |
---|
779 | | - |
---|
780 | | - list_del(&e->pending_link); |
---|
781 | | - list_add_tail(&e->link, |
---|
782 | | - &e->file_priv->event_list); |
---|
783 | | - wake_up_interruptible(&e->file_priv->event_wait); |
---|
| 859 | + drm_send_event_helper(dev, e, 0); |
---|
784 | 860 | } |
---|
785 | 861 | EXPORT_SYMBOL(drm_send_event_locked); |
---|
786 | 862 | |
---|
.. | .. |
---|
804 | 880 | unsigned long irqflags; |
---|
805 | 881 | |
---|
806 | 882 | spin_lock_irqsave(&dev->event_lock, irqflags); |
---|
807 | | - drm_send_event_locked(dev, e); |
---|
| 883 | + drm_send_event_helper(dev, e, 0); |
---|
808 | 884 | spin_unlock_irqrestore(&dev->event_lock, irqflags); |
---|
809 | 885 | } |
---|
810 | 886 | EXPORT_SYMBOL(drm_send_event); |
---|
| 887 | + |
---|
| 888 | +/** |
---|
| 889 | + * mock_drm_getfile - Create a new struct file for the drm device |
---|
| 890 | + * @minor: drm minor to wrap (e.g. #drm_device.primary) |
---|
| 891 | + * @flags: file creation mode (O_RDWR etc) |
---|
| 892 | + * |
---|
| 893 | + * This create a new struct file that wraps a DRM file context around a |
---|
| 894 | + * DRM minor. This mimicks userspace opening e.g. /dev/dri/card0, but without |
---|
| 895 | + * invoking userspace. The struct file may be operated on using its f_op |
---|
| 896 | + * (the drm_device.driver.fops) to mimick userspace operations, or be supplied |
---|
| 897 | + * to userspace facing functions as an internal/anonymous client. |
---|
| 898 | + * |
---|
| 899 | + * RETURNS: |
---|
| 900 | + * Pointer to newly created struct file, ERR_PTR on failure. |
---|
| 901 | + */ |
---|
| 902 | +struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags) |
---|
| 903 | +{ |
---|
| 904 | + struct drm_device *dev = minor->dev; |
---|
| 905 | + struct drm_file *priv; |
---|
| 906 | + struct file *file; |
---|
| 907 | + |
---|
| 908 | + priv = drm_file_alloc(minor); |
---|
| 909 | + if (IS_ERR(priv)) |
---|
| 910 | + return ERR_CAST(priv); |
---|
| 911 | + |
---|
| 912 | + file = anon_inode_getfile("drm", dev->driver->fops, priv, flags); |
---|
| 913 | + if (IS_ERR(file)) { |
---|
| 914 | + drm_file_free(priv); |
---|
| 915 | + return file; |
---|
| 916 | + } |
---|
| 917 | + |
---|
| 918 | + /* Everyone shares a single global address space */ |
---|
| 919 | + file->f_mapping = dev->anon_inode->i_mapping; |
---|
| 920 | + |
---|
| 921 | + drm_dev_get(dev); |
---|
| 922 | + priv->filp = file; |
---|
| 923 | + |
---|
| 924 | + return file; |
---|
| 925 | +} |
---|
| 926 | +EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile); |
---|
| 927 | + |
---|
| 928 | +#ifdef CONFIG_MMU |
---|
| 929 | +#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
---|
| 930 | +/* |
---|
| 931 | + * drm_addr_inflate() attempts to construct an aligned area by inflating |
---|
| 932 | + * the area size and skipping the unaligned start of the area. |
---|
| 933 | + * adapted from shmem_get_unmapped_area() |
---|
| 934 | + */ |
---|
| 935 | +static unsigned long drm_addr_inflate(unsigned long addr, |
---|
| 936 | + unsigned long len, |
---|
| 937 | + unsigned long pgoff, |
---|
| 938 | + unsigned long flags, |
---|
| 939 | + unsigned long huge_size) |
---|
| 940 | +{ |
---|
| 941 | + unsigned long offset, inflated_len; |
---|
| 942 | + unsigned long inflated_addr; |
---|
| 943 | + unsigned long inflated_offset; |
---|
| 944 | + |
---|
| 945 | + offset = (pgoff << PAGE_SHIFT) & (huge_size - 1); |
---|
| 946 | + if (offset && offset + len < 2 * huge_size) |
---|
| 947 | + return addr; |
---|
| 948 | + if ((addr & (huge_size - 1)) == offset) |
---|
| 949 | + return addr; |
---|
| 950 | + |
---|
| 951 | + inflated_len = len + huge_size - PAGE_SIZE; |
---|
| 952 | + if (inflated_len > TASK_SIZE) |
---|
| 953 | + return addr; |
---|
| 954 | + if (inflated_len < len) |
---|
| 955 | + return addr; |
---|
| 956 | + |
---|
| 957 | + inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len, |
---|
| 958 | + 0, flags); |
---|
| 959 | + if (IS_ERR_VALUE(inflated_addr)) |
---|
| 960 | + return addr; |
---|
| 961 | + if (inflated_addr & ~PAGE_MASK) |
---|
| 962 | + return addr; |
---|
| 963 | + |
---|
| 964 | + inflated_offset = inflated_addr & (huge_size - 1); |
---|
| 965 | + inflated_addr += offset - inflated_offset; |
---|
| 966 | + if (inflated_offset > offset) |
---|
| 967 | + inflated_addr += huge_size; |
---|
| 968 | + |
---|
| 969 | + if (inflated_addr > TASK_SIZE - len) |
---|
| 970 | + return addr; |
---|
| 971 | + |
---|
| 972 | + return inflated_addr; |
---|
| 973 | +} |
---|
| 974 | + |
---|
| 975 | +/** |
---|
| 976 | + * drm_get_unmapped_area() - Get an unused user-space virtual memory area |
---|
| 977 | + * suitable for huge page table entries. |
---|
| 978 | + * @file: The struct file representing the address space being mmap()'d. |
---|
| 979 | + * @uaddr: Start address suggested by user-space. |
---|
| 980 | + * @len: Length of the area. |
---|
| 981 | + * @pgoff: The page offset into the address space. |
---|
| 982 | + * @flags: mmap flags |
---|
| 983 | + * @mgr: The address space manager used by the drm driver. This argument can |
---|
| 984 | + * probably be removed at some point when all drivers use the same |
---|
| 985 | + * address space manager. |
---|
| 986 | + * |
---|
| 987 | + * This function attempts to find an unused user-space virtual memory area |
---|
| 988 | + * that can accommodate the size we want to map, and that is properly |
---|
| 989 | + * aligned to facilitate huge page table entries matching actual |
---|
| 990 | + * huge pages or huge page aligned memory in buffer objects. Buffer objects |
---|
| 991 | + * are assumed to start at huge page boundary pfns (io memory) or be |
---|
| 992 | + * populated by huge pages aligned to the start of the buffer object |
---|
| 993 | + * (system- or coherent memory). Adapted from shmem_get_unmapped_area. |
---|
| 994 | + * |
---|
| 995 | + * Return: aligned user-space address. |
---|
| 996 | + */ |
---|
| 997 | +unsigned long drm_get_unmapped_area(struct file *file, |
---|
| 998 | + unsigned long uaddr, unsigned long len, |
---|
| 999 | + unsigned long pgoff, unsigned long flags, |
---|
| 1000 | + struct drm_vma_offset_manager *mgr) |
---|
| 1001 | +{ |
---|
| 1002 | + unsigned long addr; |
---|
| 1003 | + unsigned long inflated_addr; |
---|
| 1004 | + struct drm_vma_offset_node *node; |
---|
| 1005 | + |
---|
| 1006 | + if (len > TASK_SIZE) |
---|
| 1007 | + return -ENOMEM; |
---|
| 1008 | + |
---|
| 1009 | + /* |
---|
| 1010 | + * @pgoff is the file page-offset the huge page boundaries of |
---|
| 1011 | + * which typically aligns to physical address huge page boundaries. |
---|
| 1012 | + * That's not true for DRM, however, where physical address huge |
---|
| 1013 | + * page boundaries instead are aligned with the offset from |
---|
| 1014 | + * buffer object start. So adjust @pgoff to be the offset from |
---|
| 1015 | + * buffer object start. |
---|
| 1016 | + */ |
---|
| 1017 | + drm_vma_offset_lock_lookup(mgr); |
---|
| 1018 | + node = drm_vma_offset_lookup_locked(mgr, pgoff, 1); |
---|
| 1019 | + if (node) |
---|
| 1020 | + pgoff -= node->vm_node.start; |
---|
| 1021 | + drm_vma_offset_unlock_lookup(mgr); |
---|
| 1022 | + |
---|
| 1023 | + addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags); |
---|
| 1024 | + if (IS_ERR_VALUE(addr)) |
---|
| 1025 | + return addr; |
---|
| 1026 | + if (addr & ~PAGE_MASK) |
---|
| 1027 | + return addr; |
---|
| 1028 | + if (addr > TASK_SIZE - len) |
---|
| 1029 | + return addr; |
---|
| 1030 | + |
---|
| 1031 | + if (len < HPAGE_PMD_SIZE) |
---|
| 1032 | + return addr; |
---|
| 1033 | + if (flags & MAP_FIXED) |
---|
| 1034 | + return addr; |
---|
| 1035 | + /* |
---|
| 1036 | + * Our priority is to support MAP_SHARED mapped hugely; |
---|
| 1037 | + * and support MAP_PRIVATE mapped hugely too, until it is COWed. |
---|
| 1038 | + * But if caller specified an address hint, respect that as before. |
---|
| 1039 | + */ |
---|
| 1040 | + if (uaddr) |
---|
| 1041 | + return addr; |
---|
| 1042 | + |
---|
| 1043 | + inflated_addr = drm_addr_inflate(addr, len, pgoff, flags, |
---|
| 1044 | + HPAGE_PMD_SIZE); |
---|
| 1045 | + |
---|
| 1046 | + if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && |
---|
| 1047 | + len >= HPAGE_PUD_SIZE) |
---|
| 1048 | + inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff, |
---|
| 1049 | + flags, HPAGE_PUD_SIZE); |
---|
| 1050 | + return inflated_addr; |
---|
| 1051 | +} |
---|
| 1052 | +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
---|
| 1053 | +unsigned long drm_get_unmapped_area(struct file *file, |
---|
| 1054 | + unsigned long uaddr, unsigned long len, |
---|
| 1055 | + unsigned long pgoff, unsigned long flags, |
---|
| 1056 | + struct drm_vma_offset_manager *mgr) |
---|
| 1057 | +{ |
---|
| 1058 | + return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags); |
---|
| 1059 | +} |
---|
| 1060 | +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
---|
| 1061 | +EXPORT_SYMBOL_GPL(drm_get_unmapped_area); |
---|
| 1062 | +#endif /* CONFIG_MMU */ |
---|