Commit 8255309b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'tracing-fixes-for-linus' of...

Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  tracing/filters: return proper error code when writing filter file
  tracing/filters: allow user input integer to be oct or hex
  tracing/filters: fix NULL pointer dereference
  tracing/filters: NIL-terminate user input filter
  ftrace: Output REC->var instead of __entry->var for trace format
  Make __stringify support variable argument macros too
  tracing: fix document references
  tracing: fix splice return too large
  tracing: update file->f_pos when splice(2) it
  tracing: allocate page when needed
  tracing: disable seeking for trace_pipe_raw
parents bf20753c 44e9c8b7
......@@ -6,7 +6,7 @@
* converts to "bar".
*/
#define __stringify_1(x) #x
#define __stringify(x) __stringify_1(x)
#define __stringify_1(x...) #x
#define __stringify(x...) __stringify_1(x)
#endif /* !__LINUX_STRINGIFY_H */
......@@ -312,7 +312,7 @@ config KMEMTRACE
and profile kernel code.
This requires an userspace application to use. See
Documentation/vm/kmemtrace.txt for more information.
Documentation/trace/kmemtrace.txt for more information.
Saying Y will make the kernel somewhat larger and slower. However,
if you disable kmemtrace at run-time or boot-time, the performance
......@@ -403,7 +403,7 @@ config MMIOTRACE
implementation and works via page faults. Tracing is disabled by
default and can be enabled at run-time.
See Documentation/tracers/mmiotrace.txt.
See Documentation/trace/mmiotrace.txt.
If you are not helping to develop drivers, say N.
config MMIOTRACE_TEST
......
......@@ -3277,19 +3277,13 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
info->tr = &global_trace;
info->cpu = cpu;
info->spare = ring_buffer_alloc_read_page(info->tr->buffer);
info->spare = NULL;
/* Force reading ring buffer for first read */
info->read = (unsigned int)-1;
if (!info->spare)
goto out;
filp->private_data = info;
return 0;
out:
kfree(info);
return -ENOMEM;
return nonseekable_open(inode, filp);
}
static ssize_t
......@@ -3304,6 +3298,11 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
if (!count)
return 0;
if (!info->spare)
info->spare = ring_buffer_alloc_read_page(info->tr->buffer);
if (!info->spare)
return -ENOMEM;
/* Do we have previous read data to read? */
if (info->read < PAGE_SIZE)
goto read;
......@@ -3342,6 +3341,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
{
struct ftrace_buffer_info *info = file->private_data;
if (info->spare)
ring_buffer_free_read_page(info->tr->buffer, info->spare);
kfree(info);
......@@ -3428,14 +3428,19 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
int size, i;
size_t ret;
/*
* We can't seek on a buffer input
*/
if (unlikely(*ppos))
return -ESPIPE;
if (*ppos & (PAGE_SIZE - 1)) {
WARN_ONCE(1, "Ftrace: previous read must page-align\n");
return -EINVAL;
}
if (len & (PAGE_SIZE - 1)) {
WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
if (len < PAGE_SIZE)
return -EINVAL;
len &= PAGE_MASK;
}
for (i = 0; i < PIPE_BUFFERS && len; i++, len -= size) {
for (i = 0; i < PIPE_BUFFERS && len; i++, len -= PAGE_SIZE) {
struct page *page;
int r;
......@@ -3474,6 +3479,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
spd.partial[i].offset = 0;
spd.partial[i].private = (unsigned long)ref;
spd.nr_pages++;
*ppos += PAGE_SIZE;
}
spd.nr_pages = i;
......
......@@ -503,6 +503,7 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = '\0';
pred = kzalloc(sizeof(*pred), GFP_KERNEL);
if (!pred)
......@@ -520,9 +521,10 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
return cnt;
}
if (filter_add_pred(call, pred)) {
err = filter_add_pred(call, pred);
if (err < 0) {
filter_free_pred(pred);
return -EINVAL;
return err;
}
*ppos += cnt;
......@@ -569,6 +571,7 @@ subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = '\0';
pred = kzalloc(sizeof(*pred), GFP_KERNEL);
if (!pred)
......@@ -586,10 +589,11 @@ subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
return cnt;
}
if (filter_add_subsystem_pred(system, pred)) {
err = filter_add_subsystem_pred(system, pred);
if (err < 0) {
filter_free_subsystem_preds(system);
filter_free_pred(pred);
return -EINVAL;
return err;
}
*ppos += cnt;
......
......@@ -215,7 +215,7 @@ static int __filter_add_pred(struct ftrace_event_call *call,
}
}
return -ENOMEM;
return -ENOSPC;
}
static int is_string_field(const char *type)
......@@ -319,7 +319,7 @@ int filter_add_subsystem_pred(struct event_subsystem *system,
}
if (i == MAX_FILTER_PRED)
return -EINVAL;
return -ENOSPC;
events_for_each(call) {
int err;
......@@ -410,16 +410,22 @@ int filter_parse(char **pbuf, struct filter_pred *pred)
}
}
if (!val_str) {
pred->field_name = NULL;
return -EINVAL;
}
pred->field_name = kstrdup(pred->field_name, GFP_KERNEL);
if (!pred->field_name)
return -ENOMEM;
pred->val = simple_strtoull(val_str, &tmp, 10);
pred->val = simple_strtoull(val_str, &tmp, 0);
if (tmp == val_str) {
pred->str_val = kstrdup(val_str, GFP_KERNEL);
if (!pred->str_val)
return -ENOMEM;
}
} else if (*tmp != '\0')
return -EINVAL;
return 0;
}
......
......@@ -105,10 +105,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
return 0;
#undef __entry
#define __entry "REC"
#define __entry REC
#undef TP_printk
#define TP_printk(fmt, args...) "%s, %s\n", #fmt, #args
#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
#undef TP_fast_assign
#define TP_fast_assign(args...) args
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment