hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/include/trace/events/sched.h
....@@ -91,7 +91,7 @@
9191
9292 /*
9393 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
94
- * It it not always called from the waking context.
94
+ * It is not always called from the waking context.
9595 */
9696 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
9797 TP_PROTO(struct task_struct *p),
....@@ -198,6 +198,7 @@
198198 __field( int, prio )
199199 __field( int, orig_cpu )
200200 __field( int, dest_cpu )
201
+ __field( int, running )
201202 ),
202203
203204 TP_fast_assign(
....@@ -206,11 +207,13 @@
206207 __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
207208 __entry->orig_cpu = task_cpu(p);
208209 __entry->dest_cpu = dest_cpu;
210
+ __entry->running = (p->state == TASK_RUNNING);
209211 ),
210212
211
- TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
213
+ TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d running=%d",
212214 __entry->comm, __entry->pid, __entry->prio,
213
- __entry->orig_cpu, __entry->dest_cpu)
215
+ __entry->orig_cpu, __entry->dest_cpu,
216
+ __entry->running)
214217 );
215218
216219 DECLARE_EVENT_CLASS(sched_process_template,
....@@ -241,7 +244,6 @@
241244 DEFINE_EVENT(sched_process_template, sched_process_free,
242245 TP_PROTO(struct task_struct *p),
243246 TP_ARGS(p));
244
-
245247
246248 /*
247249 * Tracepoint for a task exiting:
....@@ -336,11 +338,20 @@
336338 __entry->pid, __entry->old_pid)
337339 );
338340
341
+
342
+#ifdef CONFIG_SCHEDSTATS
343
+#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
344
+#define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
345
+#else
346
+#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
347
+#define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
348
+#endif
349
+
339350 /*
340351 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
341352 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
342353 */
343
-DECLARE_EVENT_CLASS(sched_stat_template,
354
+DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
344355
345356 TP_PROTO(struct task_struct *tsk, u64 delay),
346357
....@@ -363,12 +374,11 @@
363374 (unsigned long long)__entry->delay)
364375 );
365376
366
-
367377 /*
368378 * Tracepoint for accounting wait time (time the task is runnable
369379 * but not actually running due to scheduler contention).
370380 */
371
-DEFINE_EVENT(sched_stat_template, sched_stat_wait,
381
+DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
372382 TP_PROTO(struct task_struct *tsk, u64 delay),
373383 TP_ARGS(tsk, delay));
374384
....@@ -376,7 +386,7 @@
376386 * Tracepoint for accounting sleep time (time the task is not runnable,
377387 * including iowait, see below).
378388 */
379
-DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
389
+DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
380390 TP_PROTO(struct task_struct *tsk, u64 delay),
381391 TP_ARGS(tsk, delay));
382392
....@@ -384,14 +394,14 @@
384394 * Tracepoint for accounting iowait time (time the task is not runnable
385395 * due to waiting on IO to complete).
386396 */
387
-DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
397
+DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
388398 TP_PROTO(struct task_struct *tsk, u64 delay),
389399 TP_ARGS(tsk, delay));
390400
391401 /*
392402 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
393403 */
394
-DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
404
+DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
395405 TP_PROTO(struct task_struct *tsk, u64 delay),
396406 TP_ARGS(tsk, delay));
397407
....@@ -412,7 +422,7 @@
412422
413423 TP_fast_assign(
414424 __entry->pid = tsk->pid;
415
- __entry->caller = (void*)get_wchan(tsk);
425
+ __entry->caller = (void *)get_wchan(tsk);
416426 __entry->io_wait = tsk->in_iowait;
417427 ),
418428
....@@ -504,7 +514,11 @@
504514 );
505515 #endif /* CONFIG_DETECT_HUNG_TASK */
506516
507
-DECLARE_EVENT_CLASS(sched_move_task_template,
517
+/*
518
+ * Tracks migration of tasks from one runqueue to another. Can be used to
519
+ * detect if automatic NUMA balancing is bouncing between nodes.
520
+ */
521
+TRACE_EVENT(sched_move_numa,
508522
509523 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
510524
....@@ -536,23 +550,7 @@
536550 __entry->dst_cpu, __entry->dst_nid)
537551 );
538552
539
-/*
540
- * Tracks migration of tasks from one runqueue to another. Can be used to
541
- * detect if automatic NUMA balancing is bouncing between nodes
542
- */
543
-DEFINE_EVENT(sched_move_task_template, sched_move_numa,
544
- TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
545
-
546
- TP_ARGS(tsk, src_cpu, dst_cpu)
547
-);
548
-
549
-DEFINE_EVENT(sched_move_task_template, sched_stick_numa,
550
- TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
551
-
552
- TP_ARGS(tsk, src_cpu, dst_cpu)
553
-);
554
-
555
-TRACE_EVENT(sched_swap_numa,
553
+DECLARE_EVENT_CLASS(sched_numa_pair_template,
556554
557555 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
558556 struct task_struct *dst_tsk, int dst_cpu),
....@@ -578,11 +576,11 @@
578576 __entry->src_ngid = task_numa_group_id(src_tsk);
579577 __entry->src_cpu = src_cpu;
580578 __entry->src_nid = cpu_to_node(src_cpu);
581
- __entry->dst_pid = task_pid_nr(dst_tsk);
582
- __entry->dst_tgid = task_tgid_nr(dst_tsk);
583
- __entry->dst_ngid = task_numa_group_id(dst_tsk);
579
+ __entry->dst_pid = dst_tsk ? task_pid_nr(dst_tsk) : 0;
580
+ __entry->dst_tgid = dst_tsk ? task_tgid_nr(dst_tsk) : 0;
581
+ __entry->dst_ngid = dst_tsk ? task_numa_group_id(dst_tsk) : 0;
584582 __entry->dst_cpu = dst_cpu;
585
- __entry->dst_nid = cpu_to_node(dst_cpu);
583
+ __entry->dst_nid = dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;
586584 ),
587585
588586 TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
....@@ -591,6 +589,23 @@
591589 __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
592590 __entry->dst_cpu, __entry->dst_nid)
593591 );
592
+
593
+DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa,
594
+
595
+ TP_PROTO(struct task_struct *src_tsk, int src_cpu,
596
+ struct task_struct *dst_tsk, int dst_cpu),
597
+
598
+ TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
599
+);
600
+
601
+DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
602
+
603
+ TP_PROTO(struct task_struct *src_tsk, int src_cpu,
604
+ struct task_struct *dst_tsk, int dst_cpu),
605
+
606
+ TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
607
+);
608
+
594609
595610 /*
596611 * Tracepoint for waking a polling cpu without an IPI.
....@@ -612,423 +627,56 @@
612627 TP_printk("cpu=%d", __entry->cpu)
613628 );
614629
615
-#ifdef CONFIG_SMP
616
-#ifdef CREATE_TRACE_POINTS
617
-static inline
618
-int __trace_sched_cpu(struct cfs_rq *cfs_rq, struct sched_entity *se)
619
-{
620
-#ifdef CONFIG_FAIR_GROUP_SCHED
621
- struct rq *rq = cfs_rq ? cfs_rq->rq : NULL;
622
-#else
623
- struct rq *rq = cfs_rq ? container_of(cfs_rq, struct rq, cfs) : NULL;
624
-#endif
625
- return rq ? cpu_of(rq)
626
- : task_cpu((container_of(se, struct task_struct, se)));
627
-}
628
-
629
-static inline
630
-int __trace_sched_path(struct cfs_rq *cfs_rq, char *path, int len)
631
-{
632
-#ifdef CONFIG_FAIR_GROUP_SCHED
633
- int l = path ? len : 0;
634
-
635
- if (cfs_rq && task_group_is_autogroup(cfs_rq->tg))
636
- return autogroup_path(cfs_rq->tg, path, l) + 1;
637
- else if (cfs_rq && cfs_rq->tg->css.cgroup)
638
- return cgroup_path(cfs_rq->tg->css.cgroup, path, l) + 1;
639
-#endif
640
- if (path)
641
- strcpy(path, "(null)");
642
-
643
- return strlen("(null)");
644
-}
645
-
646
-static inline
647
-struct cfs_rq *__trace_sched_group_cfs_rq(struct sched_entity *se)
648
-{
649
-#ifdef CONFIG_FAIR_GROUP_SCHED
650
- return se->my_q;
651
-#else
652
- return NULL;
653
-#endif
654
-}
655
-#endif /* CREATE_TRACE_POINTS */
656
-
657630 /*
658
- * Tracepoint for cfs_rq load tracking:
631
+ * Following tracepoints are not exported in tracefs and provide hooking
632
+ * mechanisms only for testing and debugging purposes.
633
+ *
634
+ * Postfixed with _tp to make them easily identifiable in the code.
659635 */
660
-TRACE_EVENT(sched_load_cfs_rq,
661
-
636
+DECLARE_TRACE(pelt_cfs_tp,
662637 TP_PROTO(struct cfs_rq *cfs_rq),
638
+ TP_ARGS(cfs_rq));
663639
664
- TP_ARGS(cfs_rq),
665
-
666
- TP_STRUCT__entry(
667
- __field( int, cpu )
668
- __dynamic_array(char, path,
669
- __trace_sched_path(cfs_rq, NULL, 0) )
670
- __field( unsigned long, load )
671
- __field( unsigned long, rbl_load )
672
- __field( unsigned long, util )
673
- ),
674
-
675
- TP_fast_assign(
676
- __entry->cpu = __trace_sched_cpu(cfs_rq, NULL);
677
- __trace_sched_path(cfs_rq, __get_dynamic_array(path),
678
- __get_dynamic_array_len(path));
679
- __entry->load = cfs_rq->avg.load_avg;
680
- __entry->rbl_load = cfs_rq->avg.runnable_load_avg;
681
- __entry->util = cfs_rq->avg.util_avg;
682
- ),
683
-
684
- TP_printk("cpu=%d path=%s load=%lu rbl_load=%lu util=%lu",
685
- __entry->cpu, __get_str(path), __entry->load,
686
- __entry->rbl_load,__entry->util)
687
-);
688
-
689
-/*
690
- * Tracepoint for rt_rq load tracking:
691
- */
692
-struct rq;
693
-TRACE_EVENT(sched_load_rt_rq,
694
-
640
+DECLARE_TRACE(pelt_rt_tp,
695641 TP_PROTO(struct rq *rq),
642
+ TP_ARGS(rq));
696643
697
- TP_ARGS(rq),
644
+DECLARE_TRACE(pelt_dl_tp,
645
+ TP_PROTO(struct rq *rq),
646
+ TP_ARGS(rq));
698647
699
- TP_STRUCT__entry(
700
- __field( int, cpu )
701
- __field( unsigned long, util )
702
- ),
648
+DECLARE_TRACE(pelt_thermal_tp,
649
+ TP_PROTO(struct rq *rq),
650
+ TP_ARGS(rq));
703651
704
- TP_fast_assign(
705
- __entry->cpu = rq->cpu;
706
- __entry->util = rq->avg_rt.util_avg;
707
- ),
652
+DECLARE_TRACE(pelt_irq_tp,
653
+ TP_PROTO(struct rq *rq),
654
+ TP_ARGS(rq));
708655
709
- TP_printk("cpu=%d util=%lu", __entry->cpu,
710
- __entry->util)
711
-);
712
-
713
-/*
714
- * Tracepoint for sched_entity load tracking:
715
- */
716
-TRACE_EVENT(sched_load_se,
717
-
656
+DECLARE_TRACE(pelt_se_tp,
718657 TP_PROTO(struct sched_entity *se),
658
+ TP_ARGS(se));
719659
720
- TP_ARGS(se),
660
+DECLARE_TRACE(sched_cpu_capacity_tp,
661
+ TP_PROTO(struct rq *rq),
662
+ TP_ARGS(rq));
721663
722
- TP_STRUCT__entry(
723
- __field( int, cpu )
724
- __dynamic_array(char, path,
725
- __trace_sched_path(__trace_sched_group_cfs_rq(se), NULL, 0) )
726
- __array( char, comm, TASK_COMM_LEN )
727
- __field( pid_t, pid )
728
- __field( unsigned long, load )
729
- __field( unsigned long, rbl_load )
730
- __field( unsigned long, util )
731
- ),
664
+DECLARE_TRACE(sched_overutilized_tp,
665
+ TP_PROTO(struct root_domain *rd, bool overutilized),
666
+ TP_ARGS(rd, overutilized));
732667
733
- TP_fast_assign(
734
- struct cfs_rq *gcfs_rq = __trace_sched_group_cfs_rq(se);
735
- struct task_struct *p = gcfs_rq ? NULL
736
- : container_of(se, struct task_struct, se);
737
-
738
- __entry->cpu = __trace_sched_cpu(gcfs_rq, se);
739
- __trace_sched_path(gcfs_rq, __get_dynamic_array(path),
740
- __get_dynamic_array_len(path));
741
- memcpy(__entry->comm, p ? p->comm : "(null)",
742
- p ? TASK_COMM_LEN : sizeof("(null)"));
743
- __entry->pid = p ? p->pid : -1;
744
- __entry->load = se->avg.load_avg;
745
- __entry->rbl_load = se->avg.runnable_load_avg;
746
- __entry->util = se->avg.util_avg;
747
- ),
748
-
749
- TP_printk("cpu=%d path=%s comm=%s pid=%d load=%lu rbl_load=%lu util=%lu",
750
- __entry->cpu, __get_str(path), __entry->comm, __entry->pid,
751
- __entry->load, __entry->rbl_load, __entry->util)
752
-);
753
-
754
-/*
755
- * Tracepoint for task_group load tracking:
756
- */
757
-#ifdef CONFIG_FAIR_GROUP_SCHED
758
-TRACE_EVENT(sched_load_tg,
759
-
668
+DECLARE_TRACE(sched_util_est_cfs_tp,
760669 TP_PROTO(struct cfs_rq *cfs_rq),
670
+ TP_ARGS(cfs_rq));
761671
762
- TP_ARGS(cfs_rq),
672
+DECLARE_TRACE(sched_util_est_se_tp,
673
+ TP_PROTO(struct sched_entity *se),
674
+ TP_ARGS(se));
763675
764
- TP_STRUCT__entry(
765
- __field( int, cpu )
766
- __dynamic_array(char, path,
767
- __trace_sched_path(cfs_rq, NULL, 0) )
768
- __field( long, load )
769
- ),
676
+DECLARE_TRACE(sched_update_nr_running_tp,
677
+ TP_PROTO(struct rq *rq, int change),
678
+ TP_ARGS(rq, change));
770679
771
- TP_fast_assign(
772
- __entry->cpu = cfs_rq->rq->cpu;
773
- __trace_sched_path(cfs_rq, __get_dynamic_array(path),
774
- __get_dynamic_array_len(path));
775
- __entry->load = atomic_long_read(&cfs_rq->tg->load_avg);
776
- ),
777
-
778
- TP_printk("cpu=%d path=%s load=%ld", __entry->cpu, __get_str(path),
779
- __entry->load)
780
-);
781
-#endif /* CONFIG_FAIR_GROUP_SCHED */
782
-
783
-/*
784
- * Tracepoint for tasks' estimated utilization.
785
- */
786
-TRACE_EVENT(sched_util_est_task,
787
-
788
- TP_PROTO(struct task_struct *tsk, struct sched_avg *avg),
789
-
790
- TP_ARGS(tsk, avg),
791
-
792
- TP_STRUCT__entry(
793
- __array( char, comm, TASK_COMM_LEN )
794
- __field( pid_t, pid )
795
- __field( int, cpu )
796
- __field( unsigned int, util_avg )
797
- __field( unsigned int, est_enqueued )
798
- __field( unsigned int, est_ewma )
799
-
800
- ),
801
-
802
- TP_fast_assign(
803
- memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
804
- __entry->pid = tsk->pid;
805
- __entry->cpu = task_cpu(tsk);
806
- __entry->util_avg = avg->util_avg;
807
- __entry->est_enqueued = avg->util_est.enqueued;
808
- __entry->est_ewma = avg->util_est.ewma;
809
- ),
810
-
811
- TP_printk("comm=%s pid=%d cpu=%d util_avg=%u util_est_ewma=%u util_est_enqueued=%u",
812
- __entry->comm,
813
- __entry->pid,
814
- __entry->cpu,
815
- __entry->util_avg,
816
- __entry->est_ewma,
817
- __entry->est_enqueued)
818
-);
819
-
820
-/*
821
- * Tracepoint for root cfs_rq's estimated utilization.
822
- */
823
-TRACE_EVENT(sched_util_est_cpu,
824
-
825
- TP_PROTO(int cpu, struct cfs_rq *cfs_rq),
826
-
827
- TP_ARGS(cpu, cfs_rq),
828
-
829
- TP_STRUCT__entry(
830
- __field( int, cpu )
831
- __field( unsigned int, util_avg )
832
- __field( unsigned int, util_est_enqueued )
833
- ),
834
-
835
- TP_fast_assign(
836
- __entry->cpu = cpu;
837
- __entry->util_avg = cfs_rq->avg.util_avg;
838
- __entry->util_est_enqueued = cfs_rq->avg.util_est.enqueued;
839
- ),
840
-
841
- TP_printk("cpu=%d util_avg=%u util_est_enqueued=%u",
842
- __entry->cpu,
843
- __entry->util_avg,
844
- __entry->util_est_enqueued)
845
-);
846
-
847
-/*
848
- * Tracepoint for find_best_target
849
- */
850
-TRACE_EVENT(sched_find_best_target,
851
-
852
- TP_PROTO(struct task_struct *tsk, bool prefer_idle,
853
- unsigned long min_util, int best_idle, int best_active,
854
- int target, int backup),
855
-
856
- TP_ARGS(tsk, prefer_idle, min_util, best_idle,
857
- best_active, target, backup),
858
-
859
- TP_STRUCT__entry(
860
- __array( char, comm, TASK_COMM_LEN )
861
- __field( pid_t, pid )
862
- __field( unsigned long, min_util )
863
- __field( bool, prefer_idle )
864
- __field( int, best_idle )
865
- __field( int, best_active )
866
- __field( int, target )
867
- __field( int, backup )
868
- ),
869
-
870
- TP_fast_assign(
871
- memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
872
- __entry->pid = tsk->pid;
873
- __entry->min_util = min_util;
874
- __entry->prefer_idle = prefer_idle;
875
- __entry->best_idle = best_idle;
876
- __entry->best_active = best_active;
877
- __entry->target = target;
878
- __entry->backup = backup;
879
- ),
880
-
881
- TP_printk("pid=%d comm=%s prefer_idle=%d "
882
- "best_idle=%d best_active=%d target=%d backup=%d",
883
- __entry->pid, __entry->comm, __entry->prefer_idle,
884
- __entry->best_idle, __entry->best_active,
885
- __entry->target, __entry->backup)
886
-);
887
-
888
-/*
889
- * Tracepoint for accounting CPU boosted utilization
890
- */
891
-TRACE_EVENT(sched_boost_cpu,
892
-
893
- TP_PROTO(int cpu, unsigned long util, long margin),
894
-
895
- TP_ARGS(cpu, util, margin),
896
-
897
- TP_STRUCT__entry(
898
- __field( int, cpu )
899
- __field( unsigned long, util )
900
- __field(long, margin )
901
- ),
902
-
903
- TP_fast_assign(
904
- __entry->cpu = cpu;
905
- __entry->util = util;
906
- __entry->margin = margin;
907
- ),
908
-
909
- TP_printk("cpu=%d util=%lu margin=%ld",
910
- __entry->cpu,
911
- __entry->util,
912
- __entry->margin)
913
-);
914
-
915
-/*
916
- * Tracepoint for schedtune_tasks_update
917
- */
918
-TRACE_EVENT(sched_tune_tasks_update,
919
-
920
- TP_PROTO(struct task_struct *tsk, int cpu, int tasks, int idx,
921
- int boost, int max_boost, u64 group_ts),
922
-
923
- TP_ARGS(tsk, cpu, tasks, idx, boost, max_boost, group_ts),
924
-
925
- TP_STRUCT__entry(
926
- __array( char, comm, TASK_COMM_LEN )
927
- __field( pid_t, pid )
928
- __field( int, cpu )
929
- __field( int, tasks )
930
- __field( int, idx )
931
- __field( int, boost )
932
- __field( int, max_boost )
933
- __field( u64, group_ts )
934
- ),
935
-
936
- TP_fast_assign(
937
- memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
938
- __entry->pid = tsk->pid;
939
- __entry->cpu = cpu;
940
- __entry->tasks = tasks;
941
- __entry->idx = idx;
942
- __entry->boost = boost;
943
- __entry->max_boost = max_boost;
944
- __entry->group_ts = group_ts;
945
- ),
946
-
947
- TP_printk("pid=%d comm=%s "
948
- "cpu=%d tasks=%d idx=%d boost=%d max_boost=%d timeout=%llu",
949
- __entry->pid, __entry->comm,
950
- __entry->cpu, __entry->tasks, __entry->idx,
951
- __entry->boost, __entry->max_boost,
952
- __entry->group_ts)
953
-);
954
-
955
-/*
956
- * Tracepoint for schedtune_boostgroup_update
957
- */
958
-TRACE_EVENT(sched_tune_boostgroup_update,
959
-
960
- TP_PROTO(int cpu, int variation, int max_boost),
961
-
962
- TP_ARGS(cpu, variation, max_boost),
963
-
964
- TP_STRUCT__entry(
965
- __field( int, cpu )
966
- __field( int, variation )
967
- __field( int, max_boost )
968
- ),
969
-
970
- TP_fast_assign(
971
- __entry->cpu = cpu;
972
- __entry->variation = variation;
973
- __entry->max_boost = max_boost;
974
- ),
975
-
976
- TP_printk("cpu=%d variation=%d max_boost=%d",
977
- __entry->cpu, __entry->variation, __entry->max_boost)
978
-);
979
-
980
-/*
981
- * Tracepoint for accounting task boosted utilization
982
- */
983
-TRACE_EVENT(sched_boost_task,
984
-
985
- TP_PROTO(struct task_struct *tsk, unsigned long util, long margin),
986
-
987
- TP_ARGS(tsk, util, margin),
988
-
989
- TP_STRUCT__entry(
990
- __array( char, comm, TASK_COMM_LEN )
991
- __field( pid_t, pid )
992
- __field( unsigned long, util )
993
- __field( long, margin )
994
-
995
- ),
996
-
997
- TP_fast_assign(
998
- memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
999
- __entry->pid = tsk->pid;
1000
- __entry->util = util;
1001
- __entry->margin = margin;
1002
- ),
1003
-
1004
- TP_printk("comm=%s pid=%d util=%lu margin=%ld",
1005
- __entry->comm, __entry->pid,
1006
- __entry->util,
1007
- __entry->margin)
1008
-);
1009
-
1010
-/*
1011
- * Tracepoint for system overutilized flag
1012
-*/
1013
-TRACE_EVENT(sched_overutilized,
1014
-
1015
- TP_PROTO(int overutilized),
1016
-
1017
- TP_ARGS(overutilized),
1018
-
1019
- TP_STRUCT__entry(
1020
- __field( int, overutilized )
1021
- ),
1022
-
1023
- TP_fast_assign(
1024
- __entry->overutilized = overutilized;
1025
- ),
1026
-
1027
- TP_printk("overutilized=%d",
1028
- __entry->overutilized)
1029
-);
1030
-
1031
-#endif /* CONFIG_SMP */
1032680 #endif /* _TRACE_SCHED_H */
1033681
1034682 /* This part must be outside protection */