.. | .. |
---|
86 | 86 | static DEFINE_HASHTABLE(kcov_remote_map, 4); |
---|
87 | 87 | static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas); |
---|
88 | 88 | |
---|
| 89 | +struct kcov_percpu_data { |
---|
| 90 | + void *irq_area; |
---|
| 91 | + |
---|
| 92 | + unsigned int saved_mode; |
---|
| 93 | + unsigned int saved_size; |
---|
| 94 | + void *saved_area; |
---|
| 95 | + struct kcov *saved_kcov; |
---|
| 96 | + int saved_sequence; |
---|
| 97 | +}; |
---|
| 98 | + |
---|
| 99 | +static DEFINE_PER_CPU(struct kcov_percpu_data, kcov_percpu_data); |
---|
| 100 | + |
---|
89 | 101 | /* Must be called with kcov_remote_lock locked. */ |
---|
90 | 102 | static struct kcov_remote *kcov_remote_find(u64 handle) |
---|
91 | 103 | { |
---|
.. | .. |
---|
98 | 110 | return NULL; |
---|
99 | 111 | } |
---|
100 | 112 | |
---|
| 113 | +/* Must be called with kcov_remote_lock locked. */ |
---|
101 | 114 | static struct kcov_remote *kcov_remote_add(struct kcov *kcov, u64 handle) |
---|
102 | 115 | { |
---|
103 | 116 | struct kcov_remote *remote; |
---|
.. | .. |
---|
119 | 132 | struct kcov_remote_area *area; |
---|
120 | 133 | struct list_head *pos; |
---|
121 | 134 | |
---|
122 | | - kcov_debug("size = %u\n", size); |
---|
123 | 135 | list_for_each(pos, &kcov_remote_areas) { |
---|
124 | 136 | area = list_entry(pos, struct kcov_remote_area, list); |
---|
125 | 137 | if (area->size == size) { |
---|
126 | 138 | list_del(&area->list); |
---|
127 | | - kcov_debug("rv = %px\n", area); |
---|
128 | 139 | return area; |
---|
129 | 140 | } |
---|
130 | 141 | } |
---|
131 | | - kcov_debug("rv = NULL\n"); |
---|
132 | 142 | return NULL; |
---|
133 | 143 | } |
---|
134 | 144 | |
---|
.. | .. |
---|
136 | 146 | static void kcov_remote_area_put(struct kcov_remote_area *area, |
---|
137 | 147 | unsigned int size) |
---|
138 | 148 | { |
---|
139 | | - kcov_debug("area = %px, size = %u\n", area, size); |
---|
140 | 149 | INIT_LIST_HEAD(&area->list); |
---|
141 | 150 | area->size = size; |
---|
142 | 151 | list_add(&area->list, &kcov_remote_areas); |
---|
.. | .. |
---|
148 | 157 | |
---|
149 | 158 | /* |
---|
150 | 159 | * We are interested in code coverage as a function of a syscall inputs, |
---|
151 | | - * so we ignore code executed in interrupts. |
---|
| 160 | + * so we ignore code executed in interrupts, unless we are in a remote |
---|
| 161 | + * coverage collection section in a softirq. |
---|
152 | 162 | */ |
---|
153 | | - if (!in_task()) |
---|
| 163 | + if (!in_task() && !(in_serving_softirq() && t->kcov_softirq)) |
---|
154 | 164 | return false; |
---|
155 | 165 | mode = READ_ONCE(t->kcov_mode); |
---|
156 | 166 | /* |
---|
.. | .. |
---|
312 | 322 | EXPORT_SYMBOL(__sanitizer_cov_trace_switch); |
---|
313 | 323 | #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */ |
---|
314 | 324 | |
---|
315 | | -static void kcov_start(struct task_struct *t, unsigned int size, |
---|
316 | | - void *area, enum kcov_mode mode, int sequence) |
---|
| 325 | +static void kcov_start(struct task_struct *t, struct kcov *kcov, |
---|
| 326 | + unsigned int size, void *area, enum kcov_mode mode, |
---|
| 327 | + int sequence) |
---|
317 | 328 | { |
---|
318 | 329 | kcov_debug("t = %px, size = %u, area = %px\n", t, size, area); |
---|
| 330 | + t->kcov = kcov; |
---|
319 | 331 | /* Cache in task struct for performance. */ |
---|
320 | 332 | t->kcov_size = size; |
---|
321 | 333 | t->kcov_area = area; |
---|
| 334 | + t->kcov_sequence = sequence; |
---|
322 | 335 | /* See comment in check_kcov_mode(). */ |
---|
323 | 336 | barrier(); |
---|
324 | 337 | WRITE_ONCE(t->kcov_mode, mode); |
---|
325 | | - t->kcov_sequence = sequence; |
---|
326 | 338 | } |
---|
327 | 339 | |
---|
328 | 340 | static void kcov_stop(struct task_struct *t) |
---|
329 | 341 | { |
---|
330 | 342 | WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED); |
---|
331 | 343 | barrier(); |
---|
| 344 | + t->kcov = NULL; |
---|
332 | 345 | t->kcov_size = 0; |
---|
333 | 346 | t->kcov_area = NULL; |
---|
334 | 347 | } |
---|
.. | .. |
---|
336 | 349 | static void kcov_task_reset(struct task_struct *t) |
---|
337 | 350 | { |
---|
338 | 351 | kcov_stop(t); |
---|
339 | | - t->kcov = NULL; |
---|
340 | 352 | t->kcov_sequence = 0; |
---|
341 | 353 | t->kcov_handle = 0; |
---|
342 | 354 | } |
---|
.. | .. |
---|
361 | 373 | int bkt; |
---|
362 | 374 | struct kcov_remote *remote; |
---|
363 | 375 | struct hlist_node *tmp; |
---|
| 376 | + unsigned long flags; |
---|
364 | 377 | |
---|
365 | | - spin_lock(&kcov_remote_lock); |
---|
| 378 | + spin_lock_irqsave(&kcov_remote_lock, flags); |
---|
366 | 379 | hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) { |
---|
367 | 380 | if (remote->kcov != kcov) |
---|
368 | 381 | continue; |
---|
369 | | - kcov_debug("removing handle %llx\n", remote->handle); |
---|
370 | 382 | hash_del(&remote->hnode); |
---|
371 | 383 | kfree(remote); |
---|
372 | 384 | } |
---|
373 | 385 | /* Do reset before unlock to prevent races with kcov_remote_start(). */ |
---|
374 | 386 | kcov_reset(kcov); |
---|
375 | | - spin_unlock(&kcov_remote_lock); |
---|
| 387 | + spin_unlock_irqrestore(&kcov_remote_lock, flags); |
---|
376 | 388 | } |
---|
377 | 389 | |
---|
378 | 390 | static void kcov_disable(struct task_struct *t, struct kcov *kcov) |
---|
.. | .. |
---|
401 | 413 | void kcov_task_exit(struct task_struct *t) |
---|
402 | 414 | { |
---|
403 | 415 | struct kcov *kcov; |
---|
| 416 | + unsigned long flags; |
---|
404 | 417 | |
---|
405 | 418 | kcov = t->kcov; |
---|
406 | 419 | if (kcov == NULL) |
---|
407 | 420 | return; |
---|
408 | 421 | |
---|
409 | | - spin_lock(&kcov->lock); |
---|
| 422 | + spin_lock_irqsave(&kcov->lock, flags); |
---|
410 | 423 | kcov_debug("t = %px, kcov->t = %px\n", t, kcov->t); |
---|
411 | 424 | /* |
---|
412 | 425 | * For KCOV_ENABLE devices we want to make sure that t->kcov->t == t, |
---|
.. | .. |
---|
414 | 427 | * WARN_ON(!kcov->remote && kcov->t != t); |
---|
415 | 428 | * |
---|
416 | 429 | * For KCOV_REMOTE_ENABLE devices, the exiting task is either: |
---|
417 | | - * 2. A remote task between kcov_remote_start() and kcov_remote_stop(). |
---|
| 430 | + * |
---|
| 431 | + * 1. A remote task between kcov_remote_start() and kcov_remote_stop(). |
---|
418 | 432 | * In this case we should print a warning right away, since a task |
---|
419 | 433 | * shouldn't be exiting when it's in a kcov coverage collection |
---|
420 | 434 | * section. Here t points to the task that is collecting remote |
---|
.. | .. |
---|
424 | 438 | * WARN_ON(kcov->remote && kcov->t != t); |
---|
425 | 439 | * |
---|
426 | 440 | * 2. The task that created kcov exiting without calling KCOV_DISABLE, |
---|
427 | | - * and then again we can make sure that t->kcov->t == t: |
---|
| 441 | + * and then again we make sure that t->kcov->t == t: |
---|
428 | 442 | * WARN_ON(kcov->remote && kcov->t != t); |
---|
429 | 443 | * |
---|
430 | 444 | * By combining all three checks into one we get: |
---|
431 | 445 | */ |
---|
432 | 446 | if (WARN_ON(kcov->t != t)) { |
---|
433 | | - spin_unlock(&kcov->lock); |
---|
| 447 | + spin_unlock_irqrestore(&kcov->lock, flags); |
---|
434 | 448 | return; |
---|
435 | 449 | } |
---|
436 | 450 | /* Just to not leave dangling references behind. */ |
---|
437 | 451 | kcov_disable(t, kcov); |
---|
438 | | - spin_unlock(&kcov->lock); |
---|
| 452 | + spin_unlock_irqrestore(&kcov->lock, flags); |
---|
439 | 453 | kcov_put(kcov); |
---|
440 | 454 | } |
---|
441 | 455 | |
---|
.. | .. |
---|
446 | 460 | struct kcov *kcov = vma->vm_file->private_data; |
---|
447 | 461 | unsigned long size, off; |
---|
448 | 462 | struct page *page; |
---|
| 463 | + unsigned long flags; |
---|
449 | 464 | |
---|
450 | 465 | area = vmalloc_user(vma->vm_end - vma->vm_start); |
---|
451 | 466 | if (!area) |
---|
452 | 467 | return -ENOMEM; |
---|
453 | 468 | |
---|
454 | | - spin_lock(&kcov->lock); |
---|
| 469 | + spin_lock_irqsave(&kcov->lock, flags); |
---|
455 | 470 | size = kcov->size * sizeof(unsigned long); |
---|
456 | 471 | if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 || |
---|
457 | 472 | vma->vm_end - vma->vm_start != size) { |
---|
.. | .. |
---|
461 | 476 | if (!kcov->area) { |
---|
462 | 477 | kcov->area = area; |
---|
463 | 478 | vma->vm_flags |= VM_DONTEXPAND; |
---|
464 | | - spin_unlock(&kcov->lock); |
---|
| 479 | + spin_unlock_irqrestore(&kcov->lock, flags); |
---|
465 | 480 | for (off = 0; off < size; off += PAGE_SIZE) { |
---|
466 | 481 | page = vmalloc_to_page(kcov->area + off); |
---|
467 | 482 | if (vm_insert_page(vma, vma->vm_start + off, page)) |
---|
.. | .. |
---|
470 | 485 | return 0; |
---|
471 | 486 | } |
---|
472 | 487 | exit: |
---|
473 | | - spin_unlock(&kcov->lock); |
---|
| 488 | + spin_unlock_irqrestore(&kcov->lock, flags); |
---|
474 | 489 | vfree(area); |
---|
475 | 490 | return res; |
---|
476 | 491 | } |
---|
.. | .. |
---|
550 | 565 | int mode, i; |
---|
551 | 566 | struct kcov_remote_arg *remote_arg; |
---|
552 | 567 | struct kcov_remote *remote; |
---|
| 568 | + unsigned long flags; |
---|
553 | 569 | |
---|
554 | 570 | switch (cmd) { |
---|
555 | 571 | case KCOV_INIT_TRACE: |
---|
556 | | - kcov_debug("KCOV_INIT_TRACE\n"); |
---|
557 | 572 | /* |
---|
558 | 573 | * Enable kcov in trace mode and setup buffer size. |
---|
559 | 574 | * Must happen before anything else. |
---|
.. | .. |
---|
572 | 587 | kcov->mode = KCOV_MODE_INIT; |
---|
573 | 588 | return 0; |
---|
574 | 589 | case KCOV_ENABLE: |
---|
575 | | - kcov_debug("KCOV_ENABLE\n"); |
---|
576 | 590 | /* |
---|
577 | 591 | * Enable coverage for the current task. |
---|
578 | 592 | * At this point user must have been enabled trace mode, |
---|
.. | .. |
---|
590 | 604 | return mode; |
---|
591 | 605 | kcov_fault_in_area(kcov); |
---|
592 | 606 | kcov->mode = mode; |
---|
593 | | - kcov_start(t, kcov->size, kcov->area, kcov->mode, |
---|
| 607 | + kcov_start(t, kcov, kcov->size, kcov->area, kcov->mode, |
---|
594 | 608 | kcov->sequence); |
---|
595 | | - t->kcov = kcov; |
---|
596 | 609 | kcov->t = t; |
---|
597 | 610 | /* Put either in kcov_task_exit() or in KCOV_DISABLE. */ |
---|
598 | 611 | kcov_get(kcov); |
---|
599 | 612 | return 0; |
---|
600 | 613 | case KCOV_DISABLE: |
---|
601 | | - kcov_debug("KCOV_DISABLE\n"); |
---|
602 | 614 | /* Disable coverage for the current task. */ |
---|
603 | 615 | unused = arg; |
---|
604 | 616 | if (unused != 0 || current->kcov != kcov) |
---|
.. | .. |
---|
610 | 622 | kcov_put(kcov); |
---|
611 | 623 | return 0; |
---|
612 | 624 | case KCOV_REMOTE_ENABLE: |
---|
613 | | - kcov_debug("KCOV_REMOTE_ENABLE\n"); |
---|
614 | 625 | if (kcov->mode != KCOV_MODE_INIT || !kcov->area) |
---|
615 | 626 | return -EINVAL; |
---|
616 | 627 | t = current; |
---|
.. | .. |
---|
627 | 638 | kcov->t = t; |
---|
628 | 639 | kcov->remote = true; |
---|
629 | 640 | kcov->remote_size = remote_arg->area_size; |
---|
630 | | - spin_lock(&kcov_remote_lock); |
---|
| 641 | + spin_lock_irqsave(&kcov_remote_lock, flags); |
---|
631 | 642 | for (i = 0; i < remote_arg->num_handles; i++) { |
---|
632 | | - kcov_debug("handle %llx\n", remote_arg->handles[i]); |
---|
633 | 643 | if (!kcov_check_handle(remote_arg->handles[i], |
---|
634 | 644 | false, true, false)) { |
---|
635 | | - spin_unlock(&kcov_remote_lock); |
---|
| 645 | + spin_unlock_irqrestore(&kcov_remote_lock, |
---|
| 646 | + flags); |
---|
636 | 647 | kcov_disable(t, kcov); |
---|
637 | 648 | return -EINVAL; |
---|
638 | 649 | } |
---|
639 | 650 | remote = kcov_remote_add(kcov, remote_arg->handles[i]); |
---|
640 | 651 | if (IS_ERR(remote)) { |
---|
641 | | - spin_unlock(&kcov_remote_lock); |
---|
| 652 | + spin_unlock_irqrestore(&kcov_remote_lock, |
---|
| 653 | + flags); |
---|
642 | 654 | kcov_disable(t, kcov); |
---|
643 | 655 | return PTR_ERR(remote); |
---|
644 | 656 | } |
---|
645 | 657 | } |
---|
646 | 658 | if (remote_arg->common_handle) { |
---|
647 | | - kcov_debug("common handle %llx\n", |
---|
648 | | - remote_arg->common_handle); |
---|
649 | 659 | if (!kcov_check_handle(remote_arg->common_handle, |
---|
650 | 660 | true, false, false)) { |
---|
651 | | - spin_unlock(&kcov_remote_lock); |
---|
| 661 | + spin_unlock_irqrestore(&kcov_remote_lock, |
---|
| 662 | + flags); |
---|
652 | 663 | kcov_disable(t, kcov); |
---|
653 | 664 | return -EINVAL; |
---|
654 | 665 | } |
---|
655 | 666 | remote = kcov_remote_add(kcov, |
---|
656 | 667 | remote_arg->common_handle); |
---|
657 | 668 | if (IS_ERR(remote)) { |
---|
658 | | - spin_unlock(&kcov_remote_lock); |
---|
| 669 | + spin_unlock_irqrestore(&kcov_remote_lock, |
---|
| 670 | + flags); |
---|
659 | 671 | kcov_disable(t, kcov); |
---|
660 | 672 | return PTR_ERR(remote); |
---|
661 | 673 | } |
---|
662 | 674 | t->kcov_handle = remote_arg->common_handle; |
---|
663 | 675 | } |
---|
664 | | - spin_unlock(&kcov_remote_lock); |
---|
| 676 | + spin_unlock_irqrestore(&kcov_remote_lock, flags); |
---|
665 | 677 | /* Put either in kcov_task_exit() or in KCOV_DISABLE. */ |
---|
666 | 678 | kcov_get(kcov); |
---|
667 | 679 | return 0; |
---|
.. | .. |
---|
677 | 689 | struct kcov_remote_arg *remote_arg = NULL; |
---|
678 | 690 | unsigned int remote_num_handles; |
---|
679 | 691 | unsigned long remote_arg_size; |
---|
| 692 | + unsigned long flags; |
---|
680 | 693 | |
---|
681 | 694 | if (cmd == KCOV_REMOTE_ENABLE) { |
---|
682 | 695 | if (get_user(remote_num_handles, (unsigned __user *)(arg + |
---|
.. | .. |
---|
697 | 710 | } |
---|
698 | 711 | |
---|
699 | 712 | kcov = filep->private_data; |
---|
700 | | - spin_lock(&kcov->lock); |
---|
| 713 | + spin_lock_irqsave(&kcov->lock, flags); |
---|
701 | 714 | res = kcov_ioctl_locked(kcov, cmd, arg); |
---|
702 | | - spin_unlock(&kcov->lock); |
---|
| 715 | + spin_unlock_irqrestore(&kcov->lock, flags); |
---|
703 | 716 | |
---|
704 | 717 | kfree(remote_arg); |
---|
705 | 718 | |
---|
.. | .. |
---|
716 | 729 | |
---|
717 | 730 | /* |
---|
718 | 731 | * kcov_remote_start() and kcov_remote_stop() can be used to annotate a section |
---|
719 | | - * of code in a kernel background thread to allow kcov to be used to collect |
---|
720 | | - * coverage from that part of code. |
---|
| 732 | + * of code in a kernel background thread or in a softirq to allow kcov to be |
---|
| 733 | + * used to collect coverage from that part of code. |
---|
721 | 734 | * |
---|
722 | 735 | * The handle argument of kcov_remote_start() identifies a code section that is |
---|
723 | 736 | * used for coverage collection. A userspace process passes this handle to |
---|
.. | .. |
---|
728 | 741 | * the type of the kernel thread whose code is being annotated. |
---|
729 | 742 | * |
---|
730 | 743 | * For global kernel threads that are spawned in a limited number of instances |
---|
731 | | - * (e.g. one USB hub_event() worker thread is spawned per USB HCD), each |
---|
732 | | - * instance must be assigned a unique 4-byte instance id. The instance id is |
---|
733 | | - * then combined with a 1-byte subsystem id to get a handle via |
---|
| 744 | + * (e.g. one USB hub_event() worker thread is spawned per USB HCD) and for |
---|
| 745 | + * softirqs, each instance must be assigned a unique 4-byte instance id. The |
---|
| 746 | + * instance id is then combined with a 1-byte subsystem id to get a handle via |
---|
734 | 747 | * kcov_remote_handle(subsystem_id, instance_id). |
---|
735 | 748 | * |
---|
736 | 749 | * For local kernel threads that are spawned from system calls handler when a |
---|
.. | .. |
---|
740 | 753 | * kcov_remote_handle() with KCOV_SUBSYSTEM_COMMON as the subsystem id and an |
---|
741 | 754 | * arbitrary 4-byte non-zero number as the instance id). This common handle |
---|
742 | 755 | * then gets saved into the task_struct of the process that issued the |
---|
743 | | - * KCOV_REMOTE_ENABLE ioctl. When this proccess issues system calls that spawn |
---|
744 | | - * kernel threads, the common handle must be retrived via kcov_common_handle() |
---|
| 756 | + * KCOV_REMOTE_ENABLE ioctl. When this process issues system calls that spawn |
---|
| 757 | + * kernel threads, the common handle must be retrieved via kcov_common_handle() |
---|
745 | 758 | * and passed to the spawned threads via custom annotations. Those kernel |
---|
746 | 759 | * threads must in turn be annotated with kcov_remote_start(common_handle) and |
---|
747 | 760 | * kcov_remote_stop(). All of the threads that are spawned by the same process |
---|
.. | .. |
---|
749 | 762 | * |
---|
750 | 763 | * See Documentation/dev-tools/kcov.rst for more details. |
---|
751 | 764 | * |
---|
752 | | - * Internally, this function looks up the kcov device associated with the |
---|
| 765 | + * Internally, kcov_remote_start() looks up the kcov device associated with the |
---|
753 | 766 | * provided handle, allocates an area for coverage collection, and saves the |
---|
754 | 767 | * pointers to kcov and area into the current task_struct to allow coverage to |
---|
755 | | - * be collected via __sanitizer_cov_trace_pc() |
---|
| 768 | + * be collected via __sanitizer_cov_trace_pc(). |
---|
756 | 769 | * In turns kcov_remote_stop() clears those pointers from task_struct to stop |
---|
757 | 770 | * collecting coverage and copies all collected coverage into the kcov area. |
---|
758 | 771 | */ |
---|
| 772 | + |
---|
| 773 | +static inline bool kcov_mode_enabled(unsigned int mode) |
---|
| 774 | +{ |
---|
| 775 | + return (mode & ~KCOV_IN_CTXSW) != KCOV_MODE_DISABLED; |
---|
| 776 | +} |
---|
| 777 | + |
---|
| 778 | +static void kcov_remote_softirq_start(struct task_struct *t) |
---|
| 779 | +{ |
---|
| 780 | + struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data); |
---|
| 781 | + unsigned int mode; |
---|
| 782 | + |
---|
| 783 | + mode = READ_ONCE(t->kcov_mode); |
---|
| 784 | + barrier(); |
---|
| 785 | + if (kcov_mode_enabled(mode)) { |
---|
| 786 | + data->saved_mode = mode; |
---|
| 787 | + data->saved_size = t->kcov_size; |
---|
| 788 | + data->saved_area = t->kcov_area; |
---|
| 789 | + data->saved_sequence = t->kcov_sequence; |
---|
| 790 | + data->saved_kcov = t->kcov; |
---|
| 791 | + kcov_stop(t); |
---|
| 792 | + } |
---|
| 793 | +} |
---|
| 794 | + |
---|
| 795 | +static void kcov_remote_softirq_stop(struct task_struct *t) |
---|
| 796 | +{ |
---|
| 797 | + struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data); |
---|
| 798 | + |
---|
| 799 | + if (data->saved_kcov) { |
---|
| 800 | + kcov_start(t, data->saved_kcov, data->saved_size, |
---|
| 801 | + data->saved_area, data->saved_mode, |
---|
| 802 | + data->saved_sequence); |
---|
| 803 | + data->saved_mode = 0; |
---|
| 804 | + data->saved_size = 0; |
---|
| 805 | + data->saved_area = NULL; |
---|
| 806 | + data->saved_sequence = 0; |
---|
| 807 | + data->saved_kcov = NULL; |
---|
| 808 | + } |
---|
| 809 | +} |
---|
| 810 | + |
---|
759 | 811 | void kcov_remote_start(u64 handle) |
---|
760 | 812 | { |
---|
| 813 | + struct task_struct *t = current; |
---|
761 | 814 | struct kcov_remote *remote; |
---|
| 815 | + struct kcov *kcov; |
---|
| 816 | + unsigned int mode; |
---|
762 | 817 | void *area; |
---|
763 | | - struct task_struct *t; |
---|
764 | 818 | unsigned int size; |
---|
765 | | - enum kcov_mode mode; |
---|
766 | 819 | int sequence; |
---|
| 820 | + unsigned long flags; |
---|
767 | 821 | |
---|
768 | 822 | if (WARN_ON(!kcov_check_handle(handle, true, true, true))) |
---|
769 | 823 | return; |
---|
770 | | - if (WARN_ON(!in_task())) |
---|
771 | | - return; |
---|
772 | | - t = current; |
---|
773 | | - /* |
---|
774 | | - * Check that kcov_remote_start is not called twice |
---|
775 | | - * nor called by user tasks (with enabled kcov). |
---|
776 | | - */ |
---|
777 | | - if (WARN_ON(t->kcov)) |
---|
| 824 | + if (!in_task() && !in_serving_softirq()) |
---|
778 | 825 | return; |
---|
779 | 826 | |
---|
780 | | - kcov_debug("handle = %llx\n", handle); |
---|
| 827 | + local_irq_save(flags); |
---|
| 828 | + |
---|
| 829 | + /* |
---|
| 830 | + * Check that kcov_remote_start() is not called twice in background |
---|
| 831 | + * threads nor called by user tasks (with enabled kcov). |
---|
| 832 | + */ |
---|
| 833 | + mode = READ_ONCE(t->kcov_mode); |
---|
| 834 | + if (WARN_ON(in_task() && kcov_mode_enabled(mode))) { |
---|
| 835 | + local_irq_restore(flags); |
---|
| 836 | + return; |
---|
| 837 | + } |
---|
| 838 | + /* |
---|
| 839 | + * Check that kcov_remote_start() is not called twice in softirqs. |
---|
| 840 | + * Note, that kcov_remote_start() can be called from a softirq that |
---|
| 841 | + * happened while collecting coverage from a background thread. |
---|
| 842 | + */ |
---|
| 843 | + if (WARN_ON(in_serving_softirq() && t->kcov_softirq)) { |
---|
| 844 | + local_irq_restore(flags); |
---|
| 845 | + return; |
---|
| 846 | + } |
---|
781 | 847 | |
---|
782 | 848 | spin_lock(&kcov_remote_lock); |
---|
783 | 849 | remote = kcov_remote_find(handle); |
---|
784 | 850 | if (!remote) { |
---|
785 | | - kcov_debug("no remote found"); |
---|
786 | | - spin_unlock(&kcov_remote_lock); |
---|
| 851 | + spin_unlock_irqrestore(&kcov_remote_lock, flags); |
---|
787 | 852 | return; |
---|
788 | 853 | } |
---|
| 854 | + kcov_debug("handle = %llx, context: %s\n", handle, |
---|
| 855 | + in_task() ? "task" : "softirq"); |
---|
| 856 | + kcov = remote->kcov; |
---|
789 | 857 | /* Put in kcov_remote_stop(). */ |
---|
790 | | - kcov_get(remote->kcov); |
---|
791 | | - t->kcov = remote->kcov; |
---|
| 858 | + kcov_get(kcov); |
---|
792 | 859 | /* |
---|
793 | 860 | * Read kcov fields before unlock to prevent races with |
---|
794 | 861 | * KCOV_DISABLE / kcov_remote_reset(). |
---|
795 | 862 | */ |
---|
796 | | - size = remote->kcov->remote_size; |
---|
797 | | - mode = remote->kcov->mode; |
---|
798 | | - sequence = remote->kcov->sequence; |
---|
799 | | - area = kcov_remote_area_get(size); |
---|
800 | | - spin_unlock(&kcov_remote_lock); |
---|
| 863 | + mode = kcov->mode; |
---|
| 864 | + sequence = kcov->sequence; |
---|
| 865 | + if (in_task()) { |
---|
| 866 | + size = kcov->remote_size; |
---|
| 867 | + area = kcov_remote_area_get(size); |
---|
| 868 | + } else { |
---|
| 869 | + size = CONFIG_KCOV_IRQ_AREA_SIZE; |
---|
| 870 | + area = this_cpu_ptr(&kcov_percpu_data)->irq_area; |
---|
| 871 | + } |
---|
| 872 | + spin_unlock_irqrestore(&kcov_remote_lock, flags); |
---|
801 | 873 | |
---|
| 874 | + /* Can only happen when in_task(). */ |
---|
802 | 875 | if (!area) { |
---|
803 | 876 | area = vmalloc(size * sizeof(unsigned long)); |
---|
804 | 877 | if (!area) { |
---|
805 | | - t->kcov = NULL; |
---|
806 | | - kcov_put(remote->kcov); |
---|
| 878 | + kcov_put(kcov); |
---|
807 | 879 | return; |
---|
808 | 880 | } |
---|
809 | 881 | } |
---|
| 882 | + |
---|
| 883 | + local_irq_save(flags); |
---|
| 884 | + |
---|
810 | 885 | /* Reset coverage size. */ |
---|
811 | 886 | *(u64 *)area = 0; |
---|
812 | 887 | |
---|
813 | | - kcov_debug("area = %px, size = %u", area, size); |
---|
| 888 | + if (in_serving_softirq()) { |
---|
| 889 | + kcov_remote_softirq_start(t); |
---|
| 890 | + t->kcov_softirq = 1; |
---|
| 891 | + } |
---|
| 892 | + kcov_start(t, kcov, size, area, mode, sequence); |
---|
814 | 893 | |
---|
815 | | - kcov_start(t, size, area, mode, sequence); |
---|
| 894 | + local_irq_restore(flags); |
---|
816 | 895 | |
---|
817 | 896 | } |
---|
818 | 897 | EXPORT_SYMBOL(kcov_remote_start); |
---|
.. | .. |
---|
876 | 955 | void kcov_remote_stop(void) |
---|
877 | 956 | { |
---|
878 | 957 | struct task_struct *t = current; |
---|
879 | | - struct kcov *kcov = t->kcov; |
---|
880 | | - void *area = t->kcov_area; |
---|
881 | | - unsigned int size = t->kcov_size; |
---|
882 | | - int sequence = t->kcov_sequence; |
---|
| 958 | + struct kcov *kcov; |
---|
| 959 | + unsigned int mode; |
---|
| 960 | + void *area; |
---|
| 961 | + unsigned int size; |
---|
| 962 | + int sequence; |
---|
| 963 | + unsigned long flags; |
---|
883 | 964 | |
---|
884 | | - if (!kcov) { |
---|
885 | | - kcov_debug("no kcov found\n"); |
---|
| 965 | + if (!in_task() && !in_serving_softirq()) |
---|
| 966 | + return; |
---|
| 967 | + |
---|
| 968 | + local_irq_save(flags); |
---|
| 969 | + |
---|
| 970 | + mode = READ_ONCE(t->kcov_mode); |
---|
| 971 | + barrier(); |
---|
| 972 | + if (!kcov_mode_enabled(mode)) { |
---|
| 973 | + local_irq_restore(flags); |
---|
| 974 | + return; |
---|
| 975 | + } |
---|
| 976 | + /* |
---|
| 977 | + * When in softirq, check if the corresponding kcov_remote_start() |
---|
| 978 | + * actually found the remote handle and started collecting coverage. |
---|
| 979 | + */ |
---|
| 980 | + if (in_serving_softirq() && !t->kcov_softirq) { |
---|
| 981 | + local_irq_restore(flags); |
---|
| 982 | + return; |
---|
| 983 | + } |
---|
| 984 | + /* Make sure that kcov_softirq is only set when in softirq. */ |
---|
| 985 | + if (WARN_ON(!in_serving_softirq() && t->kcov_softirq)) { |
---|
| 986 | + local_irq_restore(flags); |
---|
886 | 987 | return; |
---|
887 | 988 | } |
---|
888 | 989 | |
---|
| 990 | + kcov = t->kcov; |
---|
| 991 | + area = t->kcov_area; |
---|
| 992 | + size = t->kcov_size; |
---|
| 993 | + sequence = t->kcov_sequence; |
---|
| 994 | + |
---|
889 | 995 | kcov_stop(t); |
---|
890 | | - t->kcov = NULL; |
---|
| 996 | + if (in_serving_softirq()) { |
---|
| 997 | + t->kcov_softirq = 0; |
---|
| 998 | + kcov_remote_softirq_stop(t); |
---|
| 999 | + } |
---|
891 | 1000 | |
---|
892 | 1001 | spin_lock(&kcov->lock); |
---|
893 | 1002 | /* |
---|
894 | 1003 | * KCOV_DISABLE could have been called between kcov_remote_start() |
---|
895 | | - * and kcov_remote_stop(), hence the check. |
---|
| 1004 | + * and kcov_remote_stop(), hence the sequence check. |
---|
896 | 1005 | */ |
---|
897 | | - kcov_debug("move if: %d == %d && %d\n", |
---|
898 | | - sequence, kcov->sequence, (int)kcov->remote); |
---|
899 | 1006 | if (sequence == kcov->sequence && kcov->remote) |
---|
900 | 1007 | kcov_move_area(kcov->mode, kcov->area, kcov->size, area); |
---|
901 | 1008 | spin_unlock(&kcov->lock); |
---|
902 | 1009 | |
---|
903 | | - spin_lock(&kcov_remote_lock); |
---|
904 | | - kcov_remote_area_put(area, size); |
---|
905 | | - spin_unlock(&kcov_remote_lock); |
---|
| 1010 | + if (in_task()) { |
---|
| 1011 | + spin_lock(&kcov_remote_lock); |
---|
| 1012 | + kcov_remote_area_put(area, size); |
---|
| 1013 | + spin_unlock(&kcov_remote_lock); |
---|
| 1014 | + } |
---|
906 | 1015 | |
---|
| 1016 | + local_irq_restore(flags); |
---|
| 1017 | + |
---|
| 1018 | + /* Get in kcov_remote_start(). */ |
---|
907 | 1019 | kcov_put(kcov); |
---|
908 | 1020 | } |
---|
909 | 1021 | EXPORT_SYMBOL(kcov_remote_stop); |
---|
.. | .. |
---|
917 | 1029 | |
---|
918 | 1030 | static int __init kcov_init(void) |
---|
919 | 1031 | { |
---|
| 1032 | + int cpu; |
---|
| 1033 | + |
---|
| 1034 | + for_each_possible_cpu(cpu) { |
---|
| 1035 | + void *area = vmalloc(CONFIG_KCOV_IRQ_AREA_SIZE * |
---|
| 1036 | + sizeof(unsigned long)); |
---|
| 1037 | + if (!area) |
---|
| 1038 | + return -ENOMEM; |
---|
| 1039 | + per_cpu_ptr(&kcov_percpu_data, cpu)->irq_area = area; |
---|
| 1040 | + } |
---|
| 1041 | + |
---|
920 | 1042 | /* |
---|
921 | 1043 | * The kcov debugfs file won't ever get removed and thus, |
---|
922 | 1044 | * there is no need to protect it against removal races. The |
---|