blob: c697c70433494d6e41e51656a767c9f575cdccba [file] [log] [blame]
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
Steven Rostedt981d0812009-03-02 13:53:59 -05006 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
Steven Rostedtb77e38a2009-02-24 10:21:36 -05009 */
10
Steven Rostedte6187002009-04-15 13:36:40 -040011#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/kthread.h>
Steven Rostedtb77e38a2009-02-24 10:21:36 -050014#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/module.h>
17#include <linux/ctype.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Steven Rostedte6187002009-04-15 13:36:40 -040019#include <linux/delay.h>
Steven Rostedtb77e38a2009-02-24 10:21:36 -050020
Li Zefan020e5f82009-07-01 10:47:05 +080021#include <asm/setup.h>
22
Steven Rostedt91729ef2009-03-02 15:03:01 -050023#include "trace_output.h"
Steven Rostedtb77e38a2009-02-24 10:21:36 -050024
Steven Rostedt4e5292e2009-09-12 19:26:21 -040025#undef TRACE_SYSTEM
Steven Rostedtb628b3e2009-02-27 23:32:58 -050026#define TRACE_SYSTEM "TRACE_SYSTEM"
27
Li Zefan20c89282009-05-06 10:33:45 +080028DEFINE_MUTEX(event_mutex);
Steven Rostedt11a241a2009-03-02 11:49:04 -050029
Steven Rostedta59fd602009-04-10 13:52:20 -040030LIST_HEAD(ftrace_events);
31
Frederic Weisbeckeraeaeae12009-08-27 05:09:51 +020032int trace_define_field(struct ftrace_event_call *call, const char *type,
33 const char *name, int offset, int size, int is_signed,
Li Zefan43b51ea2009-08-07 10:33:22 +080034 int filter_type)
Tom Zanussicf027f62009-03-22 03:30:39 -050035{
36 struct ftrace_event_field *field;
37
Ingo Molnarfe9f57f2009-03-22 18:41:59 +010038 field = kzalloc(sizeof(*field), GFP_KERNEL);
Tom Zanussicf027f62009-03-22 03:30:39 -050039 if (!field)
40 goto err;
Ingo Molnarfe9f57f2009-03-22 18:41:59 +010041
Tom Zanussicf027f62009-03-22 03:30:39 -050042 field->name = kstrdup(name, GFP_KERNEL);
43 if (!field->name)
44 goto err;
Ingo Molnarfe9f57f2009-03-22 18:41:59 +010045
Tom Zanussicf027f62009-03-22 03:30:39 -050046 field->type = kstrdup(type, GFP_KERNEL);
47 if (!field->type)
48 goto err;
Ingo Molnarfe9f57f2009-03-22 18:41:59 +010049
Li Zefan43b51ea2009-08-07 10:33:22 +080050 if (filter_type == FILTER_OTHER)
51 field->filter_type = filter_assign_type(type);
52 else
53 field->filter_type = filter_type;
54
Tom Zanussicf027f62009-03-22 03:30:39 -050055 field->offset = offset;
56 field->size = size;
Tom Zanussia118e4d2009-04-28 03:04:53 -050057 field->is_signed = is_signed;
Li Zefanaa38e9f2009-08-07 10:33:02 +080058
Tom Zanussicf027f62009-03-22 03:30:39 -050059 list_add(&field->link, &call->fields);
60
61 return 0;
Ingo Molnarfe9f57f2009-03-22 18:41:59 +010062
Tom Zanussicf027f62009-03-22 03:30:39 -050063err:
Wenji Huang7b609972010-02-24 15:40:26 +080064 if (field)
Tom Zanussicf027f62009-03-22 03:30:39 -050065 kfree(field->name);
Tom Zanussicf027f62009-03-22 03:30:39 -050066 kfree(field);
Ingo Molnarfe9f57f2009-03-22 18:41:59 +010067
Tom Zanussicf027f62009-03-22 03:30:39 -050068 return -ENOMEM;
69}
Steven Rostedt17c873e2009-04-10 18:12:50 -040070EXPORT_SYMBOL_GPL(trace_define_field);
Tom Zanussicf027f62009-03-22 03:30:39 -050071
Li Zefane647d6b2009-08-19 15:54:32 +080072#define __common_field(type, item) \
73 ret = trace_define_field(call, #type, "common_" #item, \
74 offsetof(typeof(ent), item), \
75 sizeof(ent.item), \
Li Zefan43b51ea2009-08-07 10:33:22 +080076 is_signed_type(type), FILTER_OTHER); \
Li Zefane647d6b2009-08-19 15:54:32 +080077 if (ret) \
78 return ret;
79
Li Zefan614a71a2009-12-08 11:14:36 +080080static int trace_define_common_fields(struct ftrace_event_call *call)
Li Zefane647d6b2009-08-19 15:54:32 +080081{
82 int ret;
83 struct trace_entry ent;
84
85 __common_field(unsigned short, type);
86 __common_field(unsigned char, flags);
87 __common_field(unsigned char, preempt_count);
88 __common_field(int, pid);
Steven Rostedt637e7e82009-09-11 13:55:35 -040089 __common_field(int, lock_depth);
Li Zefane647d6b2009-08-19 15:54:32 +080090
91 return ret;
92}
93
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -040094void trace_destroy_fields(struct ftrace_event_call *call)
Li Zefan2df75e42009-05-06 10:33:04 +080095{
96 struct ftrace_event_field *field, *next;
97
98 list_for_each_entry_safe(field, next, &call->fields, link) {
99 list_del(&field->link);
100 kfree(field->type);
101 kfree(field->name);
102 kfree(field);
103 }
104}
105
Li Zefan87d9b4e2009-12-08 11:14:20 +0800106int trace_event_raw_init(struct ftrace_event_call *call)
107{
108 int id;
109
110 id = register_ftrace_event(call->event);
111 if (!id)
112 return -ENODEV;
113 call->id = id;
114 INIT_LIST_HEAD(&call->fields);
115
116 return 0;
117}
118EXPORT_SYMBOL_GPL(trace_event_raw_init);
119
Li Zefan3b8e4272009-12-08 11:14:52 +0800120static int ftrace_event_enable_disable(struct ftrace_event_call *call,
Steven Rostedtfd994982009-02-28 02:41:25 -0500121 int enable)
122{
Li Zefan3b8e4272009-12-08 11:14:52 +0800123 int ret = 0;
124
Steven Rostedtfd994982009-02-28 02:41:25 -0500125 switch (enable) {
126 case 0:
127 if (call->enabled) {
128 call->enabled = 0;
Zhaoleib11c53e2009-05-25 18:11:59 +0800129 tracing_stop_cmdline_record();
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400130 call->unregfunc(call);
Steven Rostedtfd994982009-02-28 02:41:25 -0500131 }
Steven Rostedtfd994982009-02-28 02:41:25 -0500132 break;
133 case 1:
Steven Rostedtda4d0302009-03-09 17:14:30 -0400134 if (!call->enabled) {
Zhaoleib11c53e2009-05-25 18:11:59 +0800135 tracing_start_cmdline_record();
Li Zefan3b8e4272009-12-08 11:14:52 +0800136 ret = call->regfunc(call);
137 if (ret) {
138 tracing_stop_cmdline_record();
139 pr_info("event trace: Could not enable event "
140 "%s\n", call->name);
141 break;
142 }
143 call->enabled = 1;
Steven Rostedtfd994982009-02-28 02:41:25 -0500144 }
Steven Rostedtfd994982009-02-28 02:41:25 -0500145 break;
146 }
Li Zefan3b8e4272009-12-08 11:14:52 +0800147
148 return ret;
Steven Rostedtfd994982009-02-28 02:41:25 -0500149}
150
Zhaolei0e907c92009-05-25 18:13:59 +0800151static void ftrace_clear_events(void)
152{
153 struct ftrace_event_call *call;
154
155 mutex_lock(&event_mutex);
156 list_for_each_entry(call, &ftrace_events, list) {
157 ftrace_event_enable_disable(call, 0);
158 }
159 mutex_unlock(&event_mutex);
160}
161
Li Zefan8f31bfe2009-05-08 10:31:42 +0800162/*
163 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
164 */
165static int __ftrace_set_clr_event(const char *match, const char *sub,
166 const char *event, int set)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500167{
Steven Rostedta59fd602009-04-10 13:52:20 -0400168 struct ftrace_event_call *call;
Steven Rostedt29f93942009-05-08 16:06:47 -0400169 int ret = -EINVAL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500170
Steven Rostedt11a241a2009-03-02 11:49:04 -0500171 mutex_lock(&event_mutex);
Steven Rostedta59fd602009-04-10 13:52:20 -0400172 list_for_each_entry(call, &ftrace_events, list) {
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500173
Steven Rostedt40e26812009-03-10 11:32:40 -0400174 if (!call->name || !call->regfunc)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500175 continue;
Steven Rostedt1473e442009-02-24 14:15:08 -0500176
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500177 if (match &&
178 strcmp(match, call->name) != 0 &&
179 strcmp(match, call->system) != 0)
180 continue;
181
182 if (sub && strcmp(sub, call->system) != 0)
183 continue;
184
185 if (event && strcmp(event, call->name) != 0)
Steven Rostedt1473e442009-02-24 14:15:08 -0500186 continue;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500187
Steven Rostedtfd994982009-02-28 02:41:25 -0500188 ftrace_event_enable_disable(call, set);
189
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500190 ret = 0;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500191 }
Steven Rostedt11a241a2009-03-02 11:49:04 -0500192 mutex_unlock(&event_mutex);
193
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500194 return ret;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500195}
196
Li Zefan8f31bfe2009-05-08 10:31:42 +0800197static int ftrace_set_clr_event(char *buf, int set)
198{
199 char *event = NULL, *sub = NULL, *match;
200
201 /*
202 * The buf format can be <subsystem>:<event-name>
203 * *:<event-name> means any event by that name.
204 * :<event-name> is the same.
205 *
206 * <subsystem>:* means all events in that subsystem
207 * <subsystem>: means the same.
208 *
209 * <name> (no ':') means all events in a subsystem with
210 * the name <name> or any event that matches <name>
211 */
212
213 match = strsep(&buf, ":");
214 if (buf) {
215 sub = match;
216 event = buf;
217 match = NULL;
218
219 if (!strlen(sub) || strcmp(sub, "*") == 0)
220 sub = NULL;
221 if (!strlen(event) || strcmp(event, "*") == 0)
222 event = NULL;
223 }
224
225 return __ftrace_set_clr_event(match, sub, event, set);
226}
227
Steven Rostedt4671c792009-05-08 16:27:41 -0400228/**
229 * trace_set_clr_event - enable or disable an event
230 * @system: system name to match (NULL for any system)
231 * @event: event name to match (NULL for all events, within system)
232 * @set: 1 to enable, 0 to disable
233 *
234 * This is a way for other parts of the kernel to enable or disable
235 * event recording.
236 *
237 * Returns 0 on success, -EINVAL if the parameters do not match any
238 * registered events.
239 */
240int trace_set_clr_event(const char *system, const char *event, int set)
241{
242 return __ftrace_set_clr_event(NULL, system, event, set);
243}
244
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500245/* 128 should be much more than enough */
246#define EVENT_BUF_SIZE 127
247
248static ssize_t
249ftrace_event_write(struct file *file, const char __user *ubuf,
250 size_t cnt, loff_t *ppos)
251{
jolsa@redhat.com48966362009-09-11 17:29:28 +0200252 struct trace_parser parser;
Li Zefan4ba79782009-09-22 13:52:20 +0800253 ssize_t read, ret;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500254
Li Zefan4ba79782009-09-22 13:52:20 +0800255 if (!cnt)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500256 return 0;
257
Steven Rostedt1852fcc2009-03-11 14:33:00 -0400258 ret = tracing_update_buffers();
259 if (ret < 0)
260 return ret;
261
jolsa@redhat.com48966362009-09-11 17:29:28 +0200262 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500263 return -ENOMEM;
264
jolsa@redhat.com48966362009-09-11 17:29:28 +0200265 read = trace_get_user(&parser, ubuf, cnt, ppos);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500266
Li Zefan4ba79782009-09-22 13:52:20 +0800267 if (read >= 0 && trace_parser_loaded((&parser))) {
jolsa@redhat.com48966362009-09-11 17:29:28 +0200268 int set = 1;
269
270 if (*parser.buffer == '!')
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500271 set = 0;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500272
jolsa@redhat.com48966362009-09-11 17:29:28 +0200273 parser.buffer[parser.idx] = 0;
274
275 ret = ftrace_set_clr_event(parser.buffer + !set, set);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500276 if (ret)
jolsa@redhat.com48966362009-09-11 17:29:28 +0200277 goto out_put;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500278 }
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500279
280 ret = read;
281
jolsa@redhat.com48966362009-09-11 17:29:28 +0200282 out_put:
283 trace_parser_put(&parser);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500284
285 return ret;
286}
287
288static void *
289t_next(struct seq_file *m, void *v, loff_t *pos)
290{
Li Zefan30bd39c2009-09-18 14:07:05 +0800291 struct ftrace_event_call *call = v;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500292
293 (*pos)++;
294
Li Zefan30bd39c2009-09-18 14:07:05 +0800295 list_for_each_entry_continue(call, &ftrace_events, list) {
Steven Rostedt40e26812009-03-10 11:32:40 -0400296 /*
297 * The ftrace subsystem is for showing formats only.
298 * They can not be enabled or disabled via the event files.
299 */
300 if (call->regfunc)
Li Zefan30bd39c2009-09-18 14:07:05 +0800301 return call;
Steven Rostedt40e26812009-03-10 11:32:40 -0400302 }
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500303
Li Zefan30bd39c2009-09-18 14:07:05 +0800304 return NULL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500305}
306
307static void *t_start(struct seq_file *m, loff_t *pos)
308{
Li Zefan30bd39c2009-09-18 14:07:05 +0800309 struct ftrace_event_call *call;
Li Zefane1c7e2a2009-06-24 09:52:29 +0800310 loff_t l;
311
Li Zefan20c89282009-05-06 10:33:45 +0800312 mutex_lock(&event_mutex);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800313
Li Zefan30bd39c2009-09-18 14:07:05 +0800314 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800315 for (l = 0; l <= *pos; ) {
Li Zefan30bd39c2009-09-18 14:07:05 +0800316 call = t_next(m, call, &l);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800317 if (!call)
318 break;
319 }
320 return call;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500321}
322
323static void *
324s_next(struct seq_file *m, void *v, loff_t *pos)
325{
Li Zefan30bd39c2009-09-18 14:07:05 +0800326 struct ftrace_event_call *call = v;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500327
328 (*pos)++;
329
Li Zefan30bd39c2009-09-18 14:07:05 +0800330 list_for_each_entry_continue(call, &ftrace_events, list) {
331 if (call->enabled)
332 return call;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500333 }
334
Li Zefan30bd39c2009-09-18 14:07:05 +0800335 return NULL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500336}
337
338static void *s_start(struct seq_file *m, loff_t *pos)
339{
Li Zefan30bd39c2009-09-18 14:07:05 +0800340 struct ftrace_event_call *call;
Li Zefane1c7e2a2009-06-24 09:52:29 +0800341 loff_t l;
342
Li Zefan20c89282009-05-06 10:33:45 +0800343 mutex_lock(&event_mutex);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800344
Li Zefan30bd39c2009-09-18 14:07:05 +0800345 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800346 for (l = 0; l <= *pos; ) {
Li Zefan30bd39c2009-09-18 14:07:05 +0800347 call = s_next(m, call, &l);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800348 if (!call)
349 break;
350 }
351 return call;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500352}
353
354static int t_show(struct seq_file *m, void *v)
355{
356 struct ftrace_event_call *call = v;
357
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500358 if (strcmp(call->system, TRACE_SYSTEM) != 0)
359 seq_printf(m, "%s:", call->system);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500360 seq_printf(m, "%s\n", call->name);
361
362 return 0;
363}
364
365static void t_stop(struct seq_file *m, void *p)
366{
Li Zefan20c89282009-05-06 10:33:45 +0800367 mutex_unlock(&event_mutex);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500368}
369
370static int
371ftrace_event_seq_open(struct inode *inode, struct file *file)
372{
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500373 const struct seq_operations *seq_ops;
374
375 if ((file->f_mode & FMODE_WRITE) &&
Steven Rostedt8650ae32009-07-22 23:29:30 -0400376 (file->f_flags & O_TRUNC))
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500377 ftrace_clear_events();
378
379 seq_ops = inode->i_private;
Li Zefan20c89282009-05-06 10:33:45 +0800380 return seq_open(file, seq_ops);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500381}
382
Steven Rostedt1473e442009-02-24 14:15:08 -0500383static ssize_t
384event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
385 loff_t *ppos)
386{
387 struct ftrace_event_call *call = filp->private_data;
388 char *buf;
389
Steven Rostedtda4d0302009-03-09 17:14:30 -0400390 if (call->enabled)
Steven Rostedt1473e442009-02-24 14:15:08 -0500391 buf = "1\n";
392 else
393 buf = "0\n";
394
395 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
396}
397
398static ssize_t
399event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
400 loff_t *ppos)
401{
402 struct ftrace_event_call *call = filp->private_data;
403 char buf[64];
404 unsigned long val;
405 int ret;
406
407 if (cnt >= sizeof(buf))
408 return -EINVAL;
409
410 if (copy_from_user(&buf, ubuf, cnt))
411 return -EFAULT;
412
413 buf[cnt] = 0;
414
415 ret = strict_strtoul(buf, 10, &val);
416 if (ret < 0)
417 return ret;
418
Steven Rostedt1852fcc2009-03-11 14:33:00 -0400419 ret = tracing_update_buffers();
420 if (ret < 0)
421 return ret;
422
Steven Rostedt1473e442009-02-24 14:15:08 -0500423 switch (val) {
424 case 0:
Steven Rostedt1473e442009-02-24 14:15:08 -0500425 case 1:
Steven Rostedt11a241a2009-03-02 11:49:04 -0500426 mutex_lock(&event_mutex);
Li Zefan3b8e4272009-12-08 11:14:52 +0800427 ret = ftrace_event_enable_disable(call, val);
Steven Rostedt11a241a2009-03-02 11:49:04 -0500428 mutex_unlock(&event_mutex);
Steven Rostedt1473e442009-02-24 14:15:08 -0500429 break;
430
431 default:
432 return -EINVAL;
433 }
434
435 *ppos += cnt;
436
Li Zefan3b8e4272009-12-08 11:14:52 +0800437 return ret ? ret : cnt;
Steven Rostedt1473e442009-02-24 14:15:08 -0500438}
439
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400440static ssize_t
441system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
442 loff_t *ppos)
443{
Li Zefanc142b152009-05-08 10:32:05 +0800444 const char set_to_char[4] = { '?', '0', '1', 'X' };
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400445 const char *system = filp->private_data;
446 struct ftrace_event_call *call;
447 char buf[2];
Li Zefanc142b152009-05-08 10:32:05 +0800448 int set = 0;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400449 int ret;
450
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400451 mutex_lock(&event_mutex);
452 list_for_each_entry(call, &ftrace_events, list) {
453 if (!call->name || !call->regfunc)
454 continue;
455
Li Zefan8f31bfe2009-05-08 10:31:42 +0800456 if (system && strcmp(call->system, system) != 0)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400457 continue;
458
459 /*
460 * We need to find out if all the events are set
461 * or if all events or cleared, or if we have
462 * a mixture.
463 */
Li Zefanc142b152009-05-08 10:32:05 +0800464 set |= (1 << !!call->enabled);
465
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400466 /*
467 * If we have a mixture, no need to look further.
468 */
Li Zefanc142b152009-05-08 10:32:05 +0800469 if (set == 3)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400470 break;
471 }
472 mutex_unlock(&event_mutex);
473
Li Zefanc142b152009-05-08 10:32:05 +0800474 buf[0] = set_to_char[set];
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400475 buf[1] = '\n';
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400476
477 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
478
479 return ret;
480}
481
482static ssize_t
483system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
484 loff_t *ppos)
485{
486 const char *system = filp->private_data;
487 unsigned long val;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400488 char buf[64];
489 ssize_t ret;
490
491 if (cnt >= sizeof(buf))
492 return -EINVAL;
493
494 if (copy_from_user(&buf, ubuf, cnt))
495 return -EFAULT;
496
497 buf[cnt] = 0;
498
499 ret = strict_strtoul(buf, 10, &val);
500 if (ret < 0)
501 return ret;
502
503 ret = tracing_update_buffers();
504 if (ret < 0)
505 return ret;
506
Li Zefan8f31bfe2009-05-08 10:31:42 +0800507 if (val != 0 && val != 1)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400508 return -EINVAL;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400509
Li Zefan8f31bfe2009-05-08 10:31:42 +0800510 ret = __ftrace_set_clr_event(NULL, system, NULL, val);
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400511 if (ret)
Li Zefan8f31bfe2009-05-08 10:31:42 +0800512 goto out;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400513
514 ret = cnt;
515
Li Zefan8f31bfe2009-05-08 10:31:42 +0800516out:
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400517 *ppos += cnt;
518
519 return ret;
520}
521
Steven Rostedt981d0812009-03-02 13:53:59 -0500522static ssize_t
523event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
524 loff_t *ppos)
525{
526 struct ftrace_event_call *call = filp->private_data;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800527 struct ftrace_event_field *field;
Steven Rostedt981d0812009-03-02 13:53:59 -0500528 struct trace_seq *s;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800529 int common_field_count = 5;
Steven Rostedt981d0812009-03-02 13:53:59 -0500530 char *buf;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800531 int r = 0;
Steven Rostedt981d0812009-03-02 13:53:59 -0500532
Tom Zanussic269fc82009-03-17 01:20:59 -0500533 if (*ppos)
534 return 0;
535
Steven Rostedt981d0812009-03-02 13:53:59 -0500536 s = kmalloc(sizeof(*s), GFP_KERNEL);
537 if (!s)
538 return -ENOMEM;
539
540 trace_seq_init(s);
541
Steven Rostedtc5e4e192009-03-02 15:10:02 -0500542 trace_seq_printf(s, "name: %s\n", call->name);
543 trace_seq_printf(s, "ID: %d\n", call->id);
544 trace_seq_printf(s, "format:\n");
Steven Rostedt91729ef2009-03-02 15:03:01 -0500545
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800546 list_for_each_entry_reverse(field, &call->fields, link) {
547 /*
548 * Smartly shows the array type(except dynamic array).
549 * Normal:
550 * field:TYPE VAR
551 * If TYPE := TYPE[LEN], it is shown:
552 * field:TYPE VAR[LEN]
553 */
554 const char *array_descriptor = strchr(field->type, '[');
555
556 if (!strncmp(field->type, "__data_loc", 10))
557 array_descriptor = NULL;
558
559 if (!array_descriptor) {
560 r = trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;"
561 "\tsize:%u;\tsigned:%d;\n",
562 field->type, field->name, field->offset,
563 field->size, !!field->is_signed);
564 } else {
565 r = trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;"
566 "\tsize:%u;\tsigned:%d;\n",
567 (int)(array_descriptor - field->type),
568 field->type, field->name,
569 array_descriptor, field->offset,
570 field->size, !!field->is_signed);
571 }
572
573 if (--common_field_count == 0)
574 r = trace_seq_printf(s, "\n");
575
576 if (!r)
577 break;
578 }
579
580 if (r)
581 r = trace_seq_printf(s, "\nprint fmt: %s\n",
582 call->print_fmt);
583
Steven Rostedt981d0812009-03-02 13:53:59 -0500584 if (!r) {
585 /*
586 * ug! The format output is bigger than a PAGE!!
587 */
588 buf = "FORMAT TOO BIG\n";
589 r = simple_read_from_buffer(ubuf, cnt, ppos,
590 buf, strlen(buf));
591 goto out;
592 }
593
594 r = simple_read_from_buffer(ubuf, cnt, ppos,
595 s->buffer, s->len);
596 out:
597 kfree(s);
598 return r;
599}
600
Peter Zijlstra23725ae2009-03-19 20:26:13 +0100601static ssize_t
602event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
603{
604 struct ftrace_event_call *call = filp->private_data;
605 struct trace_seq *s;
606 int r;
607
608 if (*ppos)
609 return 0;
610
611 s = kmalloc(sizeof(*s), GFP_KERNEL);
612 if (!s)
613 return -ENOMEM;
614
615 trace_seq_init(s);
616 trace_seq_printf(s, "%d\n", call->id);
617
618 r = simple_read_from_buffer(ubuf, cnt, ppos,
619 s->buffer, s->len);
620 kfree(s);
621 return r;
622}
623
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500624static ssize_t
625event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
626 loff_t *ppos)
627{
628 struct ftrace_event_call *call = filp->private_data;
629 struct trace_seq *s;
630 int r;
631
632 if (*ppos)
633 return 0;
634
635 s = kmalloc(sizeof(*s), GFP_KERNEL);
636 if (!s)
637 return -ENOMEM;
638
639 trace_seq_init(s);
640
Tom Zanussi8b372562009-04-28 03:04:59 -0500641 print_event_filter(call, s);
Tom Zanussi4bda2d52009-03-24 02:14:31 -0500642 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500643
644 kfree(s);
645
646 return r;
647}
648
649static ssize_t
650event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
651 loff_t *ppos)
652{
653 struct ftrace_event_call *call = filp->private_data;
Tom Zanussi8b372562009-04-28 03:04:59 -0500654 char *buf;
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500655 int err;
656
Tom Zanussi8b372562009-04-28 03:04:59 -0500657 if (cnt >= PAGE_SIZE)
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500658 return -EINVAL;
659
Tom Zanussi8b372562009-04-28 03:04:59 -0500660 buf = (char *)__get_free_page(GFP_TEMPORARY);
661 if (!buf)
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500662 return -ENOMEM;
663
Tom Zanussi8b372562009-04-28 03:04:59 -0500664 if (copy_from_user(buf, ubuf, cnt)) {
665 free_page((unsigned long) buf);
666 return -EFAULT;
667 }
668 buf[cnt] = '\0';
669
670 err = apply_event_filter(call, buf);
671 free_page((unsigned long) buf);
672 if (err < 0)
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500673 return err;
Tom Zanussi0a19e532009-04-13 03:17:50 -0500674
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500675 *ppos += cnt;
676
677 return cnt;
678}
679
Tom Zanussicfb180f2009-03-22 03:31:17 -0500680static ssize_t
681subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
682 loff_t *ppos)
683{
684 struct event_subsystem *system = filp->private_data;
685 struct trace_seq *s;
686 int r;
687
688 if (*ppos)
689 return 0;
690
691 s = kmalloc(sizeof(*s), GFP_KERNEL);
692 if (!s)
693 return -ENOMEM;
694
695 trace_seq_init(s);
696
Tom Zanussi8b372562009-04-28 03:04:59 -0500697 print_subsystem_event_filter(system, s);
Tom Zanussi4bda2d52009-03-24 02:14:31 -0500698 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
Tom Zanussicfb180f2009-03-22 03:31:17 -0500699
700 kfree(s);
701
702 return r;
703}
704
705static ssize_t
706subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
707 loff_t *ppos)
708{
709 struct event_subsystem *system = filp->private_data;
Tom Zanussi8b372562009-04-28 03:04:59 -0500710 char *buf;
Tom Zanussicfb180f2009-03-22 03:31:17 -0500711 int err;
712
Tom Zanussi8b372562009-04-28 03:04:59 -0500713 if (cnt >= PAGE_SIZE)
Tom Zanussicfb180f2009-03-22 03:31:17 -0500714 return -EINVAL;
715
Tom Zanussi8b372562009-04-28 03:04:59 -0500716 buf = (char *)__get_free_page(GFP_TEMPORARY);
717 if (!buf)
Tom Zanussicfb180f2009-03-22 03:31:17 -0500718 return -ENOMEM;
719
Tom Zanussi8b372562009-04-28 03:04:59 -0500720 if (copy_from_user(buf, ubuf, cnt)) {
721 free_page((unsigned long) buf);
722 return -EFAULT;
Tom Zanussicfb180f2009-03-22 03:31:17 -0500723 }
Tom Zanussi8b372562009-04-28 03:04:59 -0500724 buf[cnt] = '\0';
Tom Zanussicfb180f2009-03-22 03:31:17 -0500725
Tom Zanussi8b372562009-04-28 03:04:59 -0500726 err = apply_subsystem_event_filter(system, buf);
727 free_page((unsigned long) buf);
728 if (err < 0)
Li Zefan44e9c8b2009-04-11 15:55:28 +0800729 return err;
Tom Zanussicfb180f2009-03-22 03:31:17 -0500730
731 *ppos += cnt;
732
733 return cnt;
734}
735
Steven Rostedtd1b182a2009-04-15 16:53:47 -0400736static ssize_t
737show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
738{
739 int (*func)(struct trace_seq *s) = filp->private_data;
740 struct trace_seq *s;
741 int r;
742
743 if (*ppos)
744 return 0;
745
746 s = kmalloc(sizeof(*s), GFP_KERNEL);
747 if (!s)
748 return -ENOMEM;
749
750 trace_seq_init(s);
751
752 func(s);
753 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
754
755 kfree(s);
756
757 return r;
758}
759
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500760static const struct seq_operations show_event_seq_ops = {
761 .start = t_start,
762 .next = t_next,
763 .show = t_show,
764 .stop = t_stop,
765};
766
767static const struct seq_operations show_set_event_seq_ops = {
768 .start = s_start,
769 .next = s_next,
770 .show = t_show,
771 .stop = t_stop,
772};
773
Steven Rostedt2314c4a2009-03-10 12:04:02 -0400774static const struct file_operations ftrace_avail_fops = {
775 .open = ftrace_event_seq_open,
776 .read = seq_read,
777 .llseek = seq_lseek,
778 .release = seq_release,
779};
780
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500781static const struct file_operations ftrace_set_event_fops = {
782 .open = ftrace_event_seq_open,
783 .read = seq_read,
784 .write = ftrace_event_write,
785 .llseek = seq_lseek,
786 .release = seq_release,
787};
788
Steven Rostedt1473e442009-02-24 14:15:08 -0500789static const struct file_operations ftrace_enable_fops = {
790 .open = tracing_open_generic,
791 .read = event_enable_read,
792 .write = event_enable_write,
793};
794
Steven Rostedt981d0812009-03-02 13:53:59 -0500795static const struct file_operations ftrace_event_format_fops = {
796 .open = tracing_open_generic,
797 .read = event_format_read,
798};
799
Peter Zijlstra23725ae2009-03-19 20:26:13 +0100800static const struct file_operations ftrace_event_id_fops = {
801 .open = tracing_open_generic,
802 .read = event_id_read,
803};
804
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500805static const struct file_operations ftrace_event_filter_fops = {
806 .open = tracing_open_generic,
807 .read = event_filter_read,
808 .write = event_filter_write,
809};
810
Tom Zanussicfb180f2009-03-22 03:31:17 -0500811static const struct file_operations ftrace_subsystem_filter_fops = {
812 .open = tracing_open_generic,
813 .read = subsystem_filter_read,
814 .write = subsystem_filter_write,
815};
816
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400817static const struct file_operations ftrace_system_enable_fops = {
818 .open = tracing_open_generic,
819 .read = system_enable_read,
820 .write = system_enable_write,
821};
822
Steven Rostedtd1b182a2009-04-15 16:53:47 -0400823static const struct file_operations ftrace_show_header_fops = {
824 .open = tracing_open_generic,
825 .read = show_header,
826};
827
Steven Rostedt1473e442009-02-24 14:15:08 -0500828static struct dentry *event_trace_events_dir(void)
829{
830 static struct dentry *d_tracer;
831 static struct dentry *d_events;
832
833 if (d_events)
834 return d_events;
835
836 d_tracer = tracing_init_dentry();
837 if (!d_tracer)
838 return NULL;
839
840 d_events = debugfs_create_dir("events", d_tracer);
841 if (!d_events)
842 pr_warning("Could not create debugfs "
843 "'events' directory\n");
844
845 return d_events;
846}
847
Steven Rostedt6ecc2d12009-02-27 21:33:02 -0500848static LIST_HEAD(event_subsystems);
849
850static struct dentry *
851event_subsystem_dir(const char *name, struct dentry *d_events)
852{
853 struct event_subsystem *system;
Tom Zanussie1112b42009-03-31 00:48:49 -0500854 struct dentry *entry;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -0500855
856 /* First see if we did not already create this dir */
857 list_for_each_entry(system, &event_subsystems, list) {
Xiao Guangrongdc82ec98a2009-07-09 16:22:22 +0800858 if (strcmp(system->name, name) == 0) {
859 system->nr_events++;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -0500860 return system->entry;
Xiao Guangrongdc82ec98a2009-07-09 16:22:22 +0800861 }
Steven Rostedt6ecc2d12009-02-27 21:33:02 -0500862 }
863
864 /* need to create new entry */
865 system = kmalloc(sizeof(*system), GFP_KERNEL);
866 if (!system) {
867 pr_warning("No memory to create event subsystem %s\n",
868 name);
869 return d_events;
870 }
871
872 system->entry = debugfs_create_dir(name, d_events);
873 if (!system->entry) {
874 pr_warning("Could not create event subsystem %s\n",
875 name);
876 kfree(system);
877 return d_events;
878 }
879
Xiao Guangrongdc82ec98a2009-07-09 16:22:22 +0800880 system->nr_events = 1;
Steven Rostedt6d723732009-04-10 14:53:50 -0400881 system->name = kstrdup(name, GFP_KERNEL);
882 if (!system->name) {
883 debugfs_remove(system->entry);
884 kfree(system);
885 return d_events;
886 }
887
Steven Rostedt6ecc2d12009-02-27 21:33:02 -0500888 list_add(&system->list, &event_subsystems);
889
Tom Zanussi30e673b2009-04-28 03:04:47 -0500890 system->filter = NULL;
Tom Zanussicfb180f2009-03-22 03:31:17 -0500891
Tom Zanussi8b372562009-04-28 03:04:59 -0500892 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
893 if (!system->filter) {
894 pr_warning("Could not allocate filter for subsystem "
895 "'%s'\n", name);
896 return system->entry;
897 }
898
Tom Zanussie1112b42009-03-31 00:48:49 -0500899 entry = debugfs_create_file("filter", 0644, system->entry, system,
900 &ftrace_subsystem_filter_fops);
Tom Zanussi8b372562009-04-28 03:04:59 -0500901 if (!entry) {
902 kfree(system->filter);
903 system->filter = NULL;
Tom Zanussie1112b42009-03-31 00:48:49 -0500904 pr_warning("Could not create debugfs "
905 "'%s/filter' entry\n", name);
Tom Zanussi8b372562009-04-28 03:04:59 -0500906 }
Tom Zanussie1112b42009-03-31 00:48:49 -0500907
Frederic Weisbeckerf3f3f002009-09-24 15:27:41 +0200908 trace_create_file("enable", 0644, system->entry,
909 (void *)system->name,
910 &ftrace_system_enable_fops);
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400911
Steven Rostedt6ecc2d12009-02-27 21:33:02 -0500912 return system->entry;
913}
914
Steven Rostedt1473e442009-02-24 14:15:08 -0500915static int
Steven Rostedt701970b2009-04-24 23:11:22 -0400916event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
917 const struct file_operations *id,
918 const struct file_operations *enable,
919 const struct file_operations *filter,
920 const struct file_operations *format)
Steven Rostedt1473e442009-02-24 14:15:08 -0500921{
Steven Rostedtfd994982009-02-28 02:41:25 -0500922 int ret;
Steven Rostedt1473e442009-02-24 14:15:08 -0500923
Steven Rostedt6ecc2d12009-02-27 21:33:02 -0500924 /*
925 * If the trace point header did not define TRACE_SYSTEM
926 * then the system would be called "TRACE_SYSTEM".
927 */
Steven Rostedt6d723732009-04-10 14:53:50 -0400928 if (strcmp(call->system, TRACE_SYSTEM) != 0)
Steven Rostedt6ecc2d12009-02-27 21:33:02 -0500929 d_events = event_subsystem_dir(call->system, d_events);
930
Steven Rostedt1473e442009-02-24 14:15:08 -0500931 call->dir = debugfs_create_dir(call->name, d_events);
932 if (!call->dir) {
933 pr_warning("Could not create debugfs "
934 "'%s' directory\n", call->name);
935 return -1;
936 }
937
Steven Rostedt6d723732009-04-10 14:53:50 -0400938 if (call->regfunc)
Frederic Weisbeckerf3f3f002009-09-24 15:27:41 +0200939 trace_create_file("enable", 0644, call->dir, call,
940 enable);
Steven Rostedt1473e442009-02-24 14:15:08 -0500941
Frederic Weisbecker97d5a222010-03-05 05:35:37 +0100942 if (call->id && call->perf_event_enable)
Frederic Weisbeckerf3f3f002009-09-24 15:27:41 +0200943 trace_create_file("id", 0444, call->dir, call,
944 id);
Peter Zijlstra23725ae2009-03-19 20:26:13 +0100945
Tom Zanussicf027f62009-03-22 03:30:39 -0500946 if (call->define_fields) {
Li Zefan614a71a2009-12-08 11:14:36 +0800947 ret = trace_define_common_fields(call);
948 if (!ret)
949 ret = call->define_fields(call);
Tom Zanussicf027f62009-03-22 03:30:39 -0500950 if (ret < 0) {
951 pr_warning("Could not initialize trace point"
952 " events/%s\n", call->name);
953 return ret;
954 }
Frederic Weisbeckerf3f3f002009-09-24 15:27:41 +0200955 trace_create_file("filter", 0644, call->dir, call,
956 filter);
Tom Zanussicf027f62009-03-22 03:30:39 -0500957 }
958
Frederic Weisbeckerf3f3f002009-09-24 15:27:41 +0200959 trace_create_file("format", 0444, call->dir, call,
960 format);
Steven Rostedtfd994982009-02-28 02:41:25 -0500961
Steven Rostedt1473e442009-02-24 14:15:08 -0500962 return 0;
963}
964
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400965static int __trace_add_event_call(struct ftrace_event_call *call)
966{
967 struct dentry *d_events;
968 int ret;
Steven Rostedt6d723732009-04-10 14:53:50 -0400969
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400970 if (!call->name)
971 return -EINVAL;
Steven Rostedt701970b2009-04-24 23:11:22 -0400972
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400973 if (call->raw_init) {
974 ret = call->raw_init(call);
975 if (ret < 0) {
976 if (ret != -ENOSYS)
977 pr_warning("Could not initialize trace "
978 "events/%s\n", call->name);
979 return ret;
980 }
981 }
Steven Rostedt701970b2009-04-24 23:11:22 -0400982
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400983 d_events = event_trace_events_dir();
984 if (!d_events)
985 return -ENOENT;
986
Masami Hiramatsu588bebb2009-09-16 11:42:55 -0400987 ret = event_create_dir(call, d_events, &ftrace_event_id_fops,
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400988 &ftrace_enable_fops, &ftrace_event_filter_fops,
989 &ftrace_event_format_fops);
Masami Hiramatsu88f70d72009-09-25 11:20:54 -0700990 if (!ret)
991 list_add(&call->list, &ftrace_events);
992
Masami Hiramatsu588bebb2009-09-16 11:42:55 -0400993 return ret;
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400994}
995
996/* Add an additional event_call dynamically */
997int trace_add_event_call(struct ftrace_event_call *call)
998{
999 int ret;
1000 mutex_lock(&event_mutex);
1001 ret = __trace_add_event_call(call);
1002 mutex_unlock(&event_mutex);
1003 return ret;
1004}
Steven Rostedt701970b2009-04-24 23:11:22 -04001005
Frederic Weisbeckera2ca5e02009-08-06 07:32:21 +02001006static void remove_subsystem_dir(const char *name)
1007{
1008 struct event_subsystem *system;
1009
1010 if (strcmp(name, TRACE_SYSTEM) == 0)
1011 return;
1012
1013 list_for_each_entry(system, &event_subsystems, list) {
1014 if (strcmp(system->name, name) == 0) {
1015 if (!--system->nr_events) {
1016 struct event_filter *filter = system->filter;
1017
1018 debugfs_remove_recursive(system->entry);
1019 list_del(&system->list);
1020 if (filter) {
1021 kfree(filter->filter_string);
1022 kfree(filter);
1023 }
1024 kfree(system->name);
1025 kfree(system);
1026 }
1027 break;
1028 }
1029 }
1030}
1031
Masami Hiramatsu4fead8e2009-09-14 16:49:12 -04001032/*
1033 * Must be called under locking both of event_mutex and trace_event_mutex.
1034 */
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001035static void __trace_remove_event_call(struct ftrace_event_call *call)
1036{
1037 ftrace_event_enable_disable(call, 0);
1038 if (call->event)
1039 __unregister_ftrace_event(call->event);
1040 debugfs_remove_recursive(call->dir);
1041 list_del(&call->list);
1042 trace_destroy_fields(call);
1043 destroy_preds(call);
1044 remove_subsystem_dir(call->system);
1045}
1046
1047/* Remove an event_call */
1048void trace_remove_event_call(struct ftrace_event_call *call)
1049{
1050 mutex_lock(&event_mutex);
Masami Hiramatsu4fead8e2009-09-14 16:49:12 -04001051 down_write(&trace_event_mutex);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001052 __trace_remove_event_call(call);
Masami Hiramatsu4fead8e2009-09-14 16:49:12 -04001053 up_write(&trace_event_mutex);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001054 mutex_unlock(&event_mutex);
1055}
1056
1057#define for_each_event(event, start, end) \
1058 for (event = start; \
1059 (unsigned long)event < (unsigned long)end; \
1060 event++)
1061
1062#ifdef CONFIG_MODULES
1063
1064static LIST_HEAD(ftrace_module_file_list);
1065
1066/*
1067 * Modules must own their file_operations to keep up with
1068 * reference counting.
1069 */
1070struct ftrace_module_file_ops {
1071 struct list_head list;
1072 struct module *mod;
1073 struct file_operations id;
1074 struct file_operations enable;
1075 struct file_operations format;
1076 struct file_operations filter;
1077};
1078
Steven Rostedt701970b2009-04-24 23:11:22 -04001079static struct ftrace_module_file_ops *
1080trace_create_file_ops(struct module *mod)
1081{
1082 struct ftrace_module_file_ops *file_ops;
1083
1084 /*
1085 * This is a bit of a PITA. To allow for correct reference
1086 * counting, modules must "own" their file_operations.
1087 * To do this, we allocate the file operations that will be
1088 * used in the event directory.
1089 */
1090
1091 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1092 if (!file_ops)
1093 return NULL;
1094
1095 file_ops->mod = mod;
1096
1097 file_ops->id = ftrace_event_id_fops;
1098 file_ops->id.owner = mod;
1099
1100 file_ops->enable = ftrace_enable_fops;
1101 file_ops->enable.owner = mod;
1102
1103 file_ops->filter = ftrace_event_filter_fops;
1104 file_ops->filter.owner = mod;
1105
1106 file_ops->format = ftrace_event_format_fops;
1107 file_ops->format.owner = mod;
1108
1109 list_add(&file_ops->list, &ftrace_module_file_list);
1110
1111 return file_ops;
1112}
1113
Steven Rostedt6d723732009-04-10 14:53:50 -04001114static void trace_module_add_events(struct module *mod)
1115{
Steven Rostedt701970b2009-04-24 23:11:22 -04001116 struct ftrace_module_file_ops *file_ops = NULL;
Steven Rostedt6d723732009-04-10 14:53:50 -04001117 struct ftrace_event_call *call, *start, *end;
1118 struct dentry *d_events;
Jason Baronf744bd52009-08-10 16:52:39 -04001119 int ret;
Steven Rostedt6d723732009-04-10 14:53:50 -04001120
1121 start = mod->trace_events;
1122 end = mod->trace_events + mod->num_trace_events;
1123
1124 if (start == end)
1125 return;
1126
1127 d_events = event_trace_events_dir();
1128 if (!d_events)
1129 return;
1130
1131 for_each_event(call, start, end) {
1132 /* The linker may leave blanks */
1133 if (!call->name)
1134 continue;
Jason Baronf744bd52009-08-10 16:52:39 -04001135 if (call->raw_init) {
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001136 ret = call->raw_init(call);
Jason Baronf744bd52009-08-10 16:52:39 -04001137 if (ret < 0) {
1138 if (ret != -ENOSYS)
1139 pr_warning("Could not initialize trace "
1140 "point events/%s\n", call->name);
1141 continue;
1142 }
1143 }
Steven Rostedt701970b2009-04-24 23:11:22 -04001144 /*
1145 * This module has events, create file ops for this module
1146 * if not already done.
1147 */
1148 if (!file_ops) {
1149 file_ops = trace_create_file_ops(mod);
1150 if (!file_ops)
1151 return;
1152 }
Steven Rostedt6d723732009-04-10 14:53:50 -04001153 call->mod = mod;
Masami Hiramatsu88f70d72009-09-25 11:20:54 -07001154 ret = event_create_dir(call, d_events,
1155 &file_ops->id, &file_ops->enable,
1156 &file_ops->filter, &file_ops->format);
1157 if (!ret)
1158 list_add(&call->list, &ftrace_events);
Steven Rostedt6d723732009-04-10 14:53:50 -04001159 }
1160}
1161
1162static void trace_module_remove_events(struct module *mod)
1163{
Steven Rostedt701970b2009-04-24 23:11:22 -04001164 struct ftrace_module_file_ops *file_ops;
Steven Rostedt6d723732009-04-10 14:53:50 -04001165 struct ftrace_event_call *call, *p;
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001166 bool found = false;
Steven Rostedt6d723732009-04-10 14:53:50 -04001167
Steven Rostedt110bf2b2009-06-09 17:29:07 -04001168 down_write(&trace_event_mutex);
Steven Rostedt6d723732009-04-10 14:53:50 -04001169 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1170 if (call->mod == mod) {
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001171 found = true;
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001172 __trace_remove_event_call(call);
Steven Rostedt6d723732009-04-10 14:53:50 -04001173 }
1174 }
Steven Rostedt701970b2009-04-24 23:11:22 -04001175
1176 /* Now free the file_operations */
1177 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1178 if (file_ops->mod == mod)
1179 break;
1180 }
1181 if (&file_ops->list != &ftrace_module_file_list) {
1182 list_del(&file_ops->list);
1183 kfree(file_ops);
1184 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001185
1186 /*
1187 * It is safest to reset the ring buffer if the module being unloaded
1188 * registered any events.
1189 */
1190 if (found)
1191 tracing_reset_current_online_cpus();
Steven Rostedt110bf2b2009-06-09 17:29:07 -04001192 up_write(&trace_event_mutex);
Steven Rostedt6d723732009-04-10 14:53:50 -04001193}
1194
Steven Rostedt61f919a2009-04-14 18:22:32 -04001195static int trace_module_notify(struct notifier_block *self,
1196 unsigned long val, void *data)
Steven Rostedt6d723732009-04-10 14:53:50 -04001197{
1198 struct module *mod = data;
1199
1200 mutex_lock(&event_mutex);
1201 switch (val) {
1202 case MODULE_STATE_COMING:
1203 trace_module_add_events(mod);
1204 break;
1205 case MODULE_STATE_GOING:
1206 trace_module_remove_events(mod);
1207 break;
1208 }
1209 mutex_unlock(&event_mutex);
1210
1211 return 0;
1212}
Steven Rostedt61f919a2009-04-14 18:22:32 -04001213#else
1214static int trace_module_notify(struct notifier_block *self,
1215 unsigned long val, void *data)
1216{
1217 return 0;
1218}
1219#endif /* CONFIG_MODULES */
Steven Rostedt6d723732009-04-10 14:53:50 -04001220
Steven Rostedtec827c72009-09-14 10:50:23 -04001221static struct notifier_block trace_module_nb = {
Steven Rostedt6d723732009-04-10 14:53:50 -04001222 .notifier_call = trace_module_notify,
1223 .priority = 0,
1224};
1225
Steven Rostedta59fd602009-04-10 13:52:20 -04001226extern struct ftrace_event_call __start_ftrace_events[];
1227extern struct ftrace_event_call __stop_ftrace_events[];
1228
Li Zefan020e5f82009-07-01 10:47:05 +08001229static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1230
1231static __init int setup_trace_event(char *str)
1232{
1233 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1234 ring_buffer_expanded = 1;
1235 tracing_selftest_disabled = 1;
1236
1237 return 1;
1238}
1239__setup("trace_event=", setup_trace_event);
1240
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001241static __init int event_trace_init(void)
1242{
Steven Rostedta59fd602009-04-10 13:52:20 -04001243 struct ftrace_event_call *call;
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001244 struct dentry *d_tracer;
1245 struct dentry *entry;
Steven Rostedt1473e442009-02-24 14:15:08 -05001246 struct dentry *d_events;
Steven Rostedt6d723732009-04-10 14:53:50 -04001247 int ret;
Li Zefan020e5f82009-07-01 10:47:05 +08001248 char *buf = bootup_event_buf;
1249 char *token;
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001250
1251 d_tracer = tracing_init_dentry();
1252 if (!d_tracer)
1253 return 0;
1254
Steven Rostedt2314c4a2009-03-10 12:04:02 -04001255 entry = debugfs_create_file("available_events", 0444, d_tracer,
1256 (void *)&show_event_seq_ops,
1257 &ftrace_avail_fops);
1258 if (!entry)
1259 pr_warning("Could not create debugfs "
1260 "'available_events' entry\n");
1261
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001262 entry = debugfs_create_file("set_event", 0644, d_tracer,
1263 (void *)&show_set_event_seq_ops,
1264 &ftrace_set_event_fops);
1265 if (!entry)
1266 pr_warning("Could not create debugfs "
1267 "'set_event' entry\n");
1268
Steven Rostedt1473e442009-02-24 14:15:08 -05001269 d_events = event_trace_events_dir();
1270 if (!d_events)
1271 return 0;
1272
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001273 /* ring buffer internal formats */
1274 trace_create_file("header_page", 0444, d_events,
1275 ring_buffer_print_page_header,
1276 &ftrace_show_header_fops);
1277
1278 trace_create_file("header_event", 0444, d_events,
1279 ring_buffer_print_entry_header,
1280 &ftrace_show_header_fops);
1281
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001282 trace_create_file("enable", 0644, d_events,
Li Zefan8f31bfe2009-05-08 10:31:42 +08001283 NULL, &ftrace_system_enable_fops);
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001284
Steven Rostedt6d723732009-04-10 14:53:50 -04001285 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
Steven Rostedt1473e442009-02-24 14:15:08 -05001286 /* The linker may leave blanks */
1287 if (!call->name)
1288 continue;
Jason Baronf744bd52009-08-10 16:52:39 -04001289 if (call->raw_init) {
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001290 ret = call->raw_init(call);
Jason Baronf744bd52009-08-10 16:52:39 -04001291 if (ret < 0) {
1292 if (ret != -ENOSYS)
1293 pr_warning("Could not initialize trace "
1294 "point events/%s\n", call->name);
1295 continue;
1296 }
1297 }
Masami Hiramatsu88f70d72009-09-25 11:20:54 -07001298 ret = event_create_dir(call, d_events, &ftrace_event_id_fops,
1299 &ftrace_enable_fops,
1300 &ftrace_event_filter_fops,
1301 &ftrace_event_format_fops);
1302 if (!ret)
1303 list_add(&call->list, &ftrace_events);
Steven Rostedt1473e442009-02-24 14:15:08 -05001304 }
1305
Li Zefan020e5f82009-07-01 10:47:05 +08001306 while (true) {
1307 token = strsep(&buf, ",");
1308
1309 if (!token)
1310 break;
1311 if (!*token)
1312 continue;
1313
1314 ret = ftrace_set_clr_event(token, 1);
1315 if (ret)
1316 pr_warning("Failed to enable trace event: %s\n", token);
1317 }
1318
Steven Rostedt6d723732009-04-10 14:53:50 -04001319 ret = register_module_notifier(&trace_module_nb);
Ming Lei55379372009-05-18 23:04:46 +08001320 if (ret)
Steven Rostedt6d723732009-04-10 14:53:50 -04001321 pr_warning("Failed to register trace events module notifier\n");
1322
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001323 return 0;
1324}
1325fs_initcall(event_trace_init);
Steven Rostedte6187002009-04-15 13:36:40 -04001326
1327#ifdef CONFIG_FTRACE_STARTUP_TEST
1328
1329static DEFINE_SPINLOCK(test_spinlock);
1330static DEFINE_SPINLOCK(test_spinlock_irq);
1331static DEFINE_MUTEX(test_mutex);
1332
1333static __init void test_work(struct work_struct *dummy)
1334{
1335 spin_lock(&test_spinlock);
1336 spin_lock_irq(&test_spinlock_irq);
1337 udelay(1);
1338 spin_unlock_irq(&test_spinlock_irq);
1339 spin_unlock(&test_spinlock);
1340
1341 mutex_lock(&test_mutex);
1342 msleep(1);
1343 mutex_unlock(&test_mutex);
1344}
1345
1346static __init int event_test_thread(void *unused)
1347{
1348 void *test_malloc;
1349
1350 test_malloc = kmalloc(1234, GFP_KERNEL);
1351 if (!test_malloc)
1352 pr_info("failed to kmalloc\n");
1353
1354 schedule_on_each_cpu(test_work);
1355
1356 kfree(test_malloc);
1357
1358 set_current_state(TASK_INTERRUPTIBLE);
1359 while (!kthread_should_stop())
1360 schedule();
1361
1362 return 0;
1363}
1364
1365/*
1366 * Do various things that may trigger events.
1367 */
1368static __init void event_test_stuff(void)
1369{
1370 struct task_struct *test_thread;
1371
1372 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1373 msleep(1);
1374 kthread_stop(test_thread);
1375}
1376
1377/*
1378 * For every trace event defined, we will test each trace point separately,
1379 * and then by groups, and finally all trace points.
1380 */
Steven Rostedt9ea21c12009-04-16 12:15:44 -04001381static __init void event_trace_self_tests(void)
Steven Rostedte6187002009-04-15 13:36:40 -04001382{
1383 struct ftrace_event_call *call;
1384 struct event_subsystem *system;
Steven Rostedte6187002009-04-15 13:36:40 -04001385 int ret;
1386
1387 pr_info("Running tests on trace events:\n");
1388
1389 list_for_each_entry(call, &ftrace_events, list) {
1390
1391 /* Only test those that have a regfunc */
1392 if (!call->regfunc)
1393 continue;
1394
Steven Rostedt1f5a6b42009-09-14 11:58:24 -04001395/*
1396 * Testing syscall events here is pretty useless, but
1397 * we still do it if configured. But this is time consuming.
1398 * What we really need is a user thread to perform the
1399 * syscalls as we test.
1400 */
1401#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1402 if (call->system &&
1403 strcmp(call->system, "syscalls") == 0)
1404 continue;
1405#endif
1406
Steven Rostedte6187002009-04-15 13:36:40 -04001407 pr_info("Testing event %s: ", call->name);
1408
1409 /*
1410 * If an event is already enabled, someone is using
1411 * it and the self test should not be on.
1412 */
1413 if (call->enabled) {
1414 pr_warning("Enabled event during self test!\n");
1415 WARN_ON_ONCE(1);
1416 continue;
1417 }
1418
Zhaolei0e907c92009-05-25 18:13:59 +08001419 ftrace_event_enable_disable(call, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04001420 event_test_stuff();
Zhaolei0e907c92009-05-25 18:13:59 +08001421 ftrace_event_enable_disable(call, 0);
Steven Rostedte6187002009-04-15 13:36:40 -04001422
1423 pr_cont("OK\n");
1424 }
1425
1426 /* Now test at the sub system level */
1427
1428 pr_info("Running tests on trace event systems:\n");
1429
1430 list_for_each_entry(system, &event_subsystems, list) {
1431
1432 /* the ftrace system is special, skip it */
1433 if (strcmp(system->name, "ftrace") == 0)
1434 continue;
1435
1436 pr_info("Testing event system %s: ", system->name);
1437
Li Zefan8f31bfe2009-05-08 10:31:42 +08001438 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04001439 if (WARN_ON_ONCE(ret)) {
1440 pr_warning("error enabling system %s\n",
1441 system->name);
1442 continue;
1443 }
1444
1445 event_test_stuff();
1446
Li Zefan8f31bfe2009-05-08 10:31:42 +08001447 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
Steven Rostedte6187002009-04-15 13:36:40 -04001448 if (WARN_ON_ONCE(ret))
1449 pr_warning("error disabling system %s\n",
1450 system->name);
1451
1452 pr_cont("OK\n");
1453 }
1454
1455 /* Test with all events enabled */
1456
1457 pr_info("Running tests on all trace events:\n");
1458 pr_info("Testing all events: ");
1459
Li Zefan8f31bfe2009-05-08 10:31:42 +08001460 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04001461 if (WARN_ON_ONCE(ret)) {
Steven Rostedte6187002009-04-15 13:36:40 -04001462 pr_warning("error enabling all events\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04001463 return;
Steven Rostedte6187002009-04-15 13:36:40 -04001464 }
1465
1466 event_test_stuff();
1467
1468 /* reset sysname */
Li Zefan8f31bfe2009-05-08 10:31:42 +08001469 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
Steven Rostedte6187002009-04-15 13:36:40 -04001470 if (WARN_ON_ONCE(ret)) {
1471 pr_warning("error disabling all events\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04001472 return;
Steven Rostedte6187002009-04-15 13:36:40 -04001473 }
1474
1475 pr_cont("OK\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04001476}
1477
1478#ifdef CONFIG_FUNCTION_TRACER
1479
Tejun Heo245b2e72009-06-24 15:13:48 +09001480static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
Steven Rostedt9ea21c12009-04-16 12:15:44 -04001481
1482static void
1483function_test_events_call(unsigned long ip, unsigned long parent_ip)
1484{
1485 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04001486 struct ring_buffer *buffer;
Steven Rostedt9ea21c12009-04-16 12:15:44 -04001487 struct ftrace_entry *entry;
1488 unsigned long flags;
1489 long disabled;
1490 int resched;
1491 int cpu;
1492 int pc;
1493
1494 pc = preempt_count();
1495 resched = ftrace_preempt_disable();
1496 cpu = raw_smp_processor_id();
Tejun Heo245b2e72009-06-24 15:13:48 +09001497 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
Steven Rostedt9ea21c12009-04-16 12:15:44 -04001498
1499 if (disabled != 1)
1500 goto out;
1501
1502 local_save_flags(flags);
1503
Steven Rostedte77405a2009-09-02 14:17:06 -04001504 event = trace_current_buffer_lock_reserve(&buffer,
1505 TRACE_FN, sizeof(*entry),
Steven Rostedt9ea21c12009-04-16 12:15:44 -04001506 flags, pc);
1507 if (!event)
1508 goto out;
1509 entry = ring_buffer_event_data(event);
1510 entry->ip = ip;
1511 entry->parent_ip = parent_ip;
1512
Steven Rostedte77405a2009-09-02 14:17:06 -04001513 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
Steven Rostedt9ea21c12009-04-16 12:15:44 -04001514
1515 out:
Tejun Heo245b2e72009-06-24 15:13:48 +09001516 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
Steven Rostedt9ea21c12009-04-16 12:15:44 -04001517 ftrace_preempt_enable(resched);
1518}
1519
1520static struct ftrace_ops trace_ops __initdata =
1521{
1522 .func = function_test_events_call,
1523};
1524
1525static __init void event_trace_self_test_with_function(void)
1526{
1527 register_ftrace_function(&trace_ops);
1528 pr_info("Running tests again, along with the function tracer\n");
1529 event_trace_self_tests();
1530 unregister_ftrace_function(&trace_ops);
1531}
1532#else
1533static __init void event_trace_self_test_with_function(void)
1534{
1535}
1536#endif
1537
1538static __init int event_trace_self_tests_init(void)
1539{
Li Zefan020e5f82009-07-01 10:47:05 +08001540 if (!tracing_selftest_disabled) {
1541 event_trace_self_tests();
1542 event_trace_self_test_with_function();
1543 }
Steven Rostedte6187002009-04-15 13:36:40 -04001544
1545 return 0;
1546}
1547
Steven Rostedt28d20e22009-04-20 12:12:44 -04001548late_initcall(event_trace_self_tests_init);
Steven Rostedte6187002009-04-15 13:36:40 -04001549
1550#endif