blob: 3f34dc9b40f350bac7ae8869b17e4f5216b05d1a [file] [log] [blame]
Steven Rostedt35e8e302008-05-12 21:20:42 +02001/*
2 * trace context switch
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7#include <linux/module.h>
8#include <linux/fs.h>
9#include <linux/debugfs.h>
10#include <linux/kallsyms.h>
11#include <linux/uaccess.h>
Steven Rostedt35e8e302008-05-12 21:20:42 +020012#include <linux/ftrace.h>
Steven Rostedtad8d75f2009-04-14 19:39:12 -040013#include <trace/events/sched.h>
Steven Rostedt35e8e302008-05-12 21:20:42 +020014
15#include "trace.h"
16
17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled;
Frederic Weisbeckerefade6e2008-10-31 13:28:58 +010019static int sched_ref;
20static DEFINE_MUTEX(sched_register_mutex);
Steven Rostedt5fec6dd2009-03-17 19:59:53 -040021static int sched_stopped;
Steven Rostedt35e8e302008-05-12 21:20:42 +020022
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020023
24void
25tracing_sched_switch_trace(struct trace_array *tr,
26 struct task_struct *prev,
27 struct task_struct *next,
28 unsigned long flags, int pc)
29{
30 struct ftrace_event_call *call = &event_context_switch;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -050031 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020032 struct ring_buffer_event *event;
33 struct ctx_switch_entry *entry;
34
Steven Rostedte77405a2009-09-02 14:17:06 -040035 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020036 sizeof(*entry), flags, pc);
37 if (!event)
38 return;
39 entry = ring_buffer_event_data(event);
40 entry->prev_pid = prev->pid;
41 entry->prev_prio = prev->prio;
42 entry->prev_state = prev->state;
43 entry->next_pid = next->pid;
44 entry->next_prio = next->prio;
45 entry->next_state = next->state;
46 entry->next_cpu = task_cpu(next);
47
Tom Zanussif306cc82013-10-24 08:34:17 -050048 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedte77405a2009-09-02 14:17:06 -040049 trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020050}
51
Ingo Molnare309b412008-05-12 21:20:51 +020052static void
Steven Rostedt38516ab2010-04-20 17:04:50 -040053probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next)
Steven Rostedt35e8e302008-05-12 21:20:42 +020054{
Steven Rostedt35e8e302008-05-12 21:20:42 +020055 struct trace_array_cpu *data;
56 unsigned long flags;
Steven Rostedt35e8e302008-05-12 21:20:42 +020057 int cpu;
Steven Rostedt38697052008-10-01 13:14:09 -040058 int pc;
Steven Rostedt35e8e302008-05-12 21:20:42 +020059
Zhaoleidcef7882009-03-31 15:26:14 +080060 if (unlikely(!sched_ref))
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040061 return;
62
Steven Rostedt41bc8142008-05-22 11:49:22 -040063 tracing_record_cmdline(prev);
64 tracing_record_cmdline(next);
65
Zhaoleidcef7882009-03-31 15:26:14 +080066 if (!tracer_enabled || sched_stopped)
Steven Rostedt35e8e302008-05-12 21:20:42 +020067 return;
68
Steven Rostedt38697052008-10-01 13:14:09 -040069 pc = preempt_count();
Steven Rostedt18cef372008-05-12 21:20:44 +020070 local_irq_save(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020071 cpu = raw_smp_processor_id();
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -050072 data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu);
Steven Rostedt35e8e302008-05-12 21:20:42 +020073
Steven Rostedt3ea2e6d2008-10-04 02:01:00 -040074 if (likely(!atomic_read(&data->disabled)))
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -050075 tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
Steven Rostedt35e8e302008-05-12 21:20:42 +020076
Steven Rostedt18cef372008-05-12 21:20:44 +020077 local_irq_restore(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020078}
79
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020080void
81tracing_sched_wakeup_trace(struct trace_array *tr,
82 struct task_struct *wakee,
83 struct task_struct *curr,
84 unsigned long flags, int pc)
85{
86 struct ftrace_event_call *call = &event_wakeup;
87 struct ring_buffer_event *event;
88 struct ctx_switch_entry *entry;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -050089 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020090
Steven Rostedte77405a2009-09-02 14:17:06 -040091 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020092 sizeof(*entry), flags, pc);
93 if (!event)
94 return;
95 entry = ring_buffer_event_data(event);
96 entry->prev_pid = curr->pid;
97 entry->prev_prio = curr->prio;
98 entry->prev_state = curr->state;
99 entry->next_pid = wakee->pid;
100 entry->next_prio = wakee->prio;
101 entry->next_state = wakee->state;
102 entry->next_cpu = task_cpu(wakee);
103
Tom Zanussif306cc82013-10-24 08:34:17 -0500104 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt0d5c6e12012-11-01 20:54:21 -0400105 trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker82e04af2009-07-29 18:00:29 +0200106}
107
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200108static void
Steven Rostedt38516ab2010-04-20 17:04:50 -0400109probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200110{
Ingo Molnar57422792008-05-12 21:20:51 +0200111 struct trace_array_cpu *data;
112 unsigned long flags;
Steven Rostedt38697052008-10-01 13:14:09 -0400113 int cpu, pc;
Ingo Molnar57422792008-05-12 21:20:51 +0200114
Zhaoleidcef7882009-03-31 15:26:14 +0800115 if (unlikely(!sched_ref))
116 return;
117
118 tracing_record_cmdline(current);
119
120 if (!tracer_enabled || sched_stopped)
Ingo Molnar57422792008-05-12 21:20:51 +0200121 return;
122
Steven Rostedt38697052008-10-01 13:14:09 -0400123 pc = preempt_count();
Ingo Molnar57422792008-05-12 21:20:51 +0200124 local_irq_save(flags);
125 cpu = raw_smp_processor_id();
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500126 data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu);
Ingo Molnar57422792008-05-12 21:20:51 +0200127
Steven Rostedt3ea2e6d2008-10-04 02:01:00 -0400128 if (likely(!atomic_read(&data->disabled)))
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -0500129 tracing_sched_wakeup_trace(ctx_trace, wakee, current,
Steven Rostedt38697052008-10-01 13:14:09 -0400130 flags, pc);
Ingo Molnar57422792008-05-12 21:20:51 +0200131
Ingo Molnar57422792008-05-12 21:20:51 +0200132 local_irq_restore(flags);
133}
134
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200135static int tracing_sched_register(void)
136{
137 int ret;
138
Steven Rostedt38516ab2010-04-20 17:04:50 -0400139 ret = register_trace_sched_wakeup(probe_sched_wakeup, NULL);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200140 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400141 pr_info("wakeup trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200142 " probe to kernel_sched_wakeup\n");
143 return ret;
144 }
145
Steven Rostedt38516ab2010-04-20 17:04:50 -0400146 ret = register_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200147 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400148 pr_info("wakeup trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200149 " probe to kernel_sched_wakeup_new\n");
150 goto fail_deprobe;
151 }
152
Steven Rostedt38516ab2010-04-20 17:04:50 -0400153 ret = register_trace_sched_switch(probe_sched_switch, NULL);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200154 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400155 pr_info("sched trace: Couldn't activate tracepoint"
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500156 " probe to kernel_sched_switch\n");
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200157 goto fail_deprobe_wake_new;
158 }
159
160 return ret;
161fail_deprobe_wake_new:
Steven Rostedt38516ab2010-04-20 17:04:50 -0400162 unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200163fail_deprobe:
Steven Rostedt38516ab2010-04-20 17:04:50 -0400164 unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200165 return ret;
166}
167
168static void tracing_sched_unregister(void)
169{
Steven Rostedt38516ab2010-04-20 17:04:50 -0400170 unregister_trace_sched_switch(probe_sched_switch, NULL);
171 unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
172 unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200173}
174
Ingo Molnarf2252932008-05-22 10:37:48 +0200175static void tracing_start_sched_switch(void)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200176{
Frederic Weisbeckerefade6e2008-10-31 13:28:58 +0100177 mutex_lock(&sched_register_mutex);
Steven Rostedte168e052008-11-07 22:36:02 -0500178 if (!(sched_ref++))
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200179 tracing_sched_register();
Frederic Weisbeckerefade6e2008-10-31 13:28:58 +0100180 mutex_unlock(&sched_register_mutex);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200181}
182
Ingo Molnarf2252932008-05-22 10:37:48 +0200183static void tracing_stop_sched_switch(void)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200184{
Frederic Weisbeckerefade6e2008-10-31 13:28:58 +0100185 mutex_lock(&sched_register_mutex);
Steven Rostedte168e052008-11-07 22:36:02 -0500186 if (!(--sched_ref))
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200187 tracing_sched_unregister();
Frederic Weisbeckerefade6e2008-10-31 13:28:58 +0100188 mutex_unlock(&sched_register_mutex);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200189}
190
Steven Rostedt41bc8142008-05-22 11:49:22 -0400191void tracing_start_cmdline_record(void)
192{
193 tracing_start_sched_switch();
194}
195
196void tracing_stop_cmdline_record(void)
197{
198 tracing_stop_sched_switch();
199}
200
Steven Rostedt75f5c472008-11-07 22:36:02 -0500201/**
Steven Rostedte168e052008-11-07 22:36:02 -0500202 * tracing_start_sched_switch_record - start tracing context switches
203 *
204 * Turns on context switch tracing for a tracer.
205 */
206void tracing_start_sched_switch_record(void)
207{
208 if (unlikely(!ctx_trace)) {
209 WARN_ON(1);
210 return;
211 }
212
213 tracing_start_sched_switch();
214
215 mutex_lock(&sched_register_mutex);
216 tracer_enabled++;
217 mutex_unlock(&sched_register_mutex);
218}
219
220/**
221 * tracing_stop_sched_switch_record - start tracing context switches
222 *
223 * Turns off context switch tracing for a tracer.
224 */
225void tracing_stop_sched_switch_record(void)
226{
227 mutex_lock(&sched_register_mutex);
228 tracer_enabled--;
229 WARN_ON(tracer_enabled < 0);
230 mutex_unlock(&sched_register_mutex);
231
232 tracing_stop_sched_switch();
233}
234
235/**
236 * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
Steven Rostedt75f5c472008-11-07 22:36:02 -0500237 * @tr: trace array pointer to assign
238 *
239 * Some tracers might want to record the context switches in their
240 * trace. This function lets those tracers assign the trace array
241 * to use.
242 */
Steven Rostedte168e052008-11-07 22:36:02 -0500243void tracing_sched_switch_assign_trace(struct trace_array *tr)
Steven Rostedt75f5c472008-11-07 22:36:02 -0500244{
245 ctx_trace = tr;
246}
247