The builtin appraise policy appraises all files
owned by uid=0.
- ima_audit= [IMA]
- Format: { "0" | "1" }
- 0 -- integrity auditing messages. (Default)
- 1 -- enable informational integrity auditing messages.
-
ima_hash= [IMA]
Format: { "sha1" | "md5" }
default: "sha1"
inport.irq= [HW] Inport (ATI XL and Microsoft) busmouse driver
Format: <irq>
+ int_pln_enable [x86] Enable power limit notification interrupt
+
+ integrity_audit=[IMA]
+ Format: { "0" | "1" }
+ 0 -- basic integrity auditing messages. (Default)
+ 1 -- additional integrity auditing messages.
+
intel_iommu= [DMAR] Intel IOMMU driver (DMAR) option
on
Enable intel iommu driver.
* dump_id: dump IDENTIFY data.
+ * atapi_dmadir: Enable ATAPI DMADIR bridge support
+
If there are multiple matching configurations changing
the same attribute, the last one is used.
Run specified binary instead of /init from the ramdisk,
used for early userspace startup. See initrd.
- reboot= [BUGS=X86-32,BUGS=ARM,BUGS=IA-64] Rebooting mode
- Format: <reboot_mode>[,<reboot_mode2>[,...]]
- See arch/*/kernel/reboot.c or arch/*/kernel/process.c
+ reboot= [KNL]
+ Format (x86 or x86_64):
+ [w[arm] | c[old] | h[ard] | s[oft] | g[pio]] \
+ [[,]s[mp]#### \
+ [[,]b[ios] | a[cpi] | k[bd] | t[riple] | e[fi] | p[ci]] \
+ [[,]f[orce]
+ Where reboot_mode is one of warm (soft) or cold (hard) or gpio,
+ reboot_type is one of bios, acpi, kbd, triple, efi, or pci,
+ reboot_force is either force or not specified,
+ reboot_cpu is s[mp]#### with #### being the processor
+ to be used for rebooting.
relax_domain_level=
[KNL, SMP] Set scheduler's default relax_domain_level.
See also Documentation/trace/ftrace.txt "trace options"
section.
+ traceoff_on_warning
+ [FTRACE] enable this option to disable tracing when a
+ warning is hit. This turns off "tracing_on". Tracing can
+ be enabled again by echoing '1' into the "tracing_on"
+ file located in /sys/kernel/debug/tracing/
+
+ This option is useful, as it disables the trace before
+ the WARNING dump is called, which prevents the trace to
+ be filled with content caused by the warning output.
+
+ This option can also be set at run time via the sysctl
+ option: kernel/traceoff_on_warning
+
transparent_hugepage=
[KNL]
Format: [always|madvise|never]
video= [FB] Frame buffer configuration
See Documentation/fb/modedb.txt.
+ video.brightness_switch_enabled= [0,1]
+ If set to 1, on receiving an ACPI notify event
+ generated by hotkey, video driver will adjust brightness
+ level and then send out the event to user space through
+ the allocated input device; If set to 0, video driver
+ will only send out the event without touching backlight
+ brightness level.
+ default: 1
+
virtio_mmio.device=
[VMMIO] Memory mapped virtio (platform) device.
that this also can be controlled per-workqueue for
workqueues visible under /sys/bus/workqueue/.
+ workqueue.power_efficient
+ Per-cpu workqueues are generally preferred because
+ they show better performance thanks to cache
+ locality; unfortunately, per-cpu workqueues tend to
+ be more power hungry than unbound workqueues.
+
+ Enabling this makes the per-cpu workqueues which
+ were observed to contribute significantly to power
+ consumption unbound, leading to measurably lower
+ power usage at the cost of small performance
+ overhead.
+
+ The default value of this parameter is determined by
+ the config option CONFIG_WQ_POWER_EFFICIENT_DEFAULT.
+
x2apic_phys [X86-64,APIC] Use x2apic physical mode instead of
default x2apic cluster mode on platforms
supporting x2apic.
plus one apbt timer for broadcast timer.
x86_mrst_timer=apbt_only | lapic_and_apbt
- xd= [HW,XT] Original XT pre-IDE (RLL encoded) disks.
- xd_geo= See header of drivers/block/xd.c.
-
xen_emul_unplug= [HW,X86,XEN]
Unplug Xen emulated devices
Format: [unplug0,][unplug1]
enum ftrace_dump_mode ftrace_dump_on_oops;
+ /* When set, tracing will stop when a WARN*() is hit */
+ int __disable_trace_on_warning;
+
static int tracing_set_tracer(const char *buf);
#define MAX_TRACER_SIZE 100
}
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
+ static int __init stop_trace_on_warning(char *str)
+ {
+ __disable_trace_on_warning = 1;
+ return 1;
+ }
+ __setup("traceoff_on_warning=", stop_trace_on_warning);
+
static int __init boot_alloc_snapshot(char *str)
{
allocate_snapshot = true;
}
__setup("trace_options=", set_trace_boot_options);
+
unsigned long long ns2usecs(cycle_t nsec)
{
nsec += 500;
LIST_HEAD(ftrace_trace_arrays);
+ int trace_array_get(struct trace_array *this_tr)
+ {
+ struct trace_array *tr;
+ int ret = -ENODEV;
+
+ mutex_lock(&trace_types_lock);
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ if (tr == this_tr) {
+ tr->ref++;
+ ret = 0;
+ break;
+ }
+ }
+ mutex_unlock(&trace_types_lock);
+
+ return ret;
+ }
+
+ static void __trace_array_put(struct trace_array *this_tr)
+ {
+ WARN_ON(!this_tr->ref);
+ this_tr->ref--;
+ }
+
+ void trace_array_put(struct trace_array *this_tr)
+ {
+ mutex_lock(&trace_types_lock);
+ __trace_array_put(this_tr);
+ mutex_unlock(&trace_types_lock);
+ }
+
int filter_current_check_discard(struct ring_buffer *buffer,
struct ftrace_event_call *call, void *rec,
struct ring_buffer_event *event)
return ts;
}
+ /**
+ * tracing_is_enabled - Show if global_trace has been disabled
+ *
+ * Shows if the global trace has been enabled or not. It uses the
+ * mirror flag "buffer_disabled" to be used in fast paths such as for
+ * the irqsoff tracer. But it may be inaccurate due to races. If you
+ * need to know the accurate state, use tracing_is_on() which is a little
+ * slower, but accurate.
+ */
int tracing_is_enabled(void)
{
- return tracing_is_on();
+ /*
+ * For quick access (irqsoff uses this in fast path), just
+ * return the mirror variable of the state of the ring buffer.
+ * It's a little racy, but we don't really care.
+ */
+ smp_rmb();
+ return !global_trace.buffer_disabled;
}
/*
/*
* trace_types_lock is used to protect the trace_types list.
*/
- static DEFINE_MUTEX(trace_types_lock);
+ DEFINE_MUTEX(trace_types_lock);
/*
* serialize the access of the ring buffer
TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
+ static void tracer_tracing_on(struct trace_array *tr)
+ {
+ if (tr->trace_buffer.buffer)
+ ring_buffer_record_on(tr->trace_buffer.buffer);
+ /*
+ * This flag is looked at when buffers haven't been allocated
+ * yet, or by some tracers (like irqsoff), that just want to
+ * know if the ring buffer has been disabled, but it can handle
+ * races of where it gets disabled but we still do a record.
+ * As the check is in the fast path of the tracers, it is more
+ * important to be fast than accurate.
+ */
+ tr->buffer_disabled = 0;
+ /* Make the flag seen by readers */
+ smp_wmb();
+ }
+
/**
* tracing_on - enable tracing buffers
*
*/
void tracing_on(void)
{
- if (global_trace.trace_buffer.buffer)
- ring_buffer_record_on(global_trace.trace_buffer.buffer);
- /*
- * This flag is only looked at when buffers haven't been
- * allocated yet. We don't really care about the race
- * between setting this flag and actually turning
- * on the buffer.
- */
- global_trace.buffer_disabled = 0;
+ tracer_tracing_on(&global_trace);
}
EXPORT_SYMBOL_GPL(tracing_on);
EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
#endif /* CONFIG_TRACER_SNAPSHOT */
+ static void tracer_tracing_off(struct trace_array *tr)
+ {
+ if (tr->trace_buffer.buffer)
+ ring_buffer_record_off(tr->trace_buffer.buffer);
+ /*
+ * This flag is looked at when buffers haven't been allocated
+ * yet, or by some tracers (like irqsoff), that just want to
+ * know if the ring buffer has been disabled, but it can handle
+ * races of where it gets disabled but we still do a record.
+ * As the check is in the fast path of the tracers, it is more
+ * important to be fast than accurate.
+ */
+ tr->buffer_disabled = 1;
+ /* Make the flag seen by readers */
+ smp_wmb();
+ }
+
/**
* tracing_off - turn off tracing buffers
*
*/
void tracing_off(void)
{
- if (global_trace.trace_buffer.buffer)
- ring_buffer_record_off(global_trace.trace_buffer.buffer);
- /*
- * This flag is only looked at when buffers haven't been
- * allocated yet. We don't really care about the race
- * between setting this flag and actually turning
- * on the buffer.
- */
- global_trace.buffer_disabled = 1;
+ tracer_tracing_off(&global_trace);
}
EXPORT_SYMBOL_GPL(tracing_off);
+ void disable_trace_on_warning(void)
+ {
+ if (__disable_trace_on_warning)
+ tracing_off();
+ }
+
+ /**
+ * tracer_tracing_is_on - show real state of ring buffer enabled
+ * @tr : the trace array to know if ring buffer is enabled
+ *
+ * Shows real state of the ring buffer if it is enabled or not.
+ */
+ static int tracer_tracing_is_on(struct trace_array *tr)
+ {
+ if (tr->trace_buffer.buffer)
+ return ring_buffer_record_is_on(tr->trace_buffer.buffer);
+ return !tr->buffer_disabled;
+ }
+
/**
* tracing_is_on - show state of ring buffers enabled
*/
int tracing_is_on(void)
{
- if (global_trace.trace_buffer.buffer)
- return ring_buffer_record_is_on(global_trace.trace_buffer.buffer);
- return !global_trace.buffer_disabled;
+ return tracer_tracing_is_on(&global_trace);
}
EXPORT_SYMBOL_GPL(tracing_is_on);
ARCH_TRACE_CLOCKS
};
-int trace_clock_id;
-
/*
* trace_parser_get_init - gets the buffer for trace parser
*/
__buffer_unlock_commit(buffer, event);
}
- void
- ftrace(struct trace_array *tr, struct trace_array_cpu *data,
- unsigned long ip, unsigned long parent_ip, unsigned long flags,
- int pc)
- {
- if (likely(!atomic_read(&data->disabled)))
- trace_function(tr, ip, parent_ip, flags, pc);
- }
-
#ifdef CONFIG_STACKTRACE
#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
};
static struct trace_iterator *
- __tracing_open(struct inode *inode, struct file *file, bool snapshot)
+ __tracing_open(struct trace_array *tr, struct trace_cpu *tc,
+ struct inode *inode, struct file *file, bool snapshot)
{
- struct trace_cpu *tc = inode->i_private;
- struct trace_array *tr = tc->tr;
struct trace_iterator *iter;
int cpu;
iter->iter_flags |= TRACE_FILE_ANNOTATE;
/* Output in nanoseconds only if we are using a clock in nanoseconds. */
- if (trace_clocks[trace_clock_id].in_ns)
+ if (trace_clocks[tr->clock_id].in_ns)
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
/* stop the trace while dumping if we are not opening "snapshot" */
tracing_iter_reset(iter, cpu);
}
- tr->ref++;
-
mutex_unlock(&trace_types_lock);
return iter;
return 0;
}
+ /*
+ * Open and update trace_array ref count.
+ * Must have the current trace_array passed to it.
+ */
+ static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
+ {
+ struct trace_array *tr = inode->i_private;
+
+ if (tracing_disabled)
+ return -ENODEV;
+
+ if (trace_array_get(tr) < 0)
+ return -ENODEV;
+
+ filp->private_data = inode->i_private;
+
+ return 0;
+
+ }
+
+ static int tracing_open_generic_tc(struct inode *inode, struct file *filp)
+ {
+ struct trace_cpu *tc = inode->i_private;
+ struct trace_array *tr = tc->tr;
+
+ if (tracing_disabled)
+ return -ENODEV;
+
+ if (trace_array_get(tr) < 0)
+ return -ENODEV;
+
+ filp->private_data = inode->i_private;
+
+ return 0;
+
+ }
+
static int tracing_release(struct inode *inode, struct file *file)
{
struct seq_file *m = file->private_data;
struct trace_array *tr;
int cpu;
- if (!(file->f_mode & FMODE_READ))
+ /* Writes do not use seq_file, need to grab tr from inode */
+ if (!(file->f_mode & FMODE_READ)) {
+ struct trace_cpu *tc = inode->i_private;
+
+ trace_array_put(tc->tr);
return 0;
+ }
iter = m->private;
tr = iter->tr;
+ trace_array_put(tr);
mutex_lock(&trace_types_lock);
- WARN_ON(!tr->ref);
- tr->ref--;
-
for_each_tracing_cpu(cpu) {
if (iter->buffer_iter[cpu])
ring_buffer_read_finish(iter->buffer_iter[cpu]);
kfree(iter->trace);
kfree(iter->buffer_iter);
seq_release_private(inode, file);
+
+ return 0;
+ }
+
+ static int tracing_release_generic_tr(struct inode *inode, struct file *file)
+ {
+ struct trace_array *tr = inode->i_private;
+
+ trace_array_put(tr);
return 0;
}
+ static int tracing_release_generic_tc(struct inode *inode, struct file *file)
+ {
+ struct trace_cpu *tc = inode->i_private;
+ struct trace_array *tr = tc->tr;
+
+ trace_array_put(tr);
+ return 0;
+ }
+
+ static int tracing_single_release_tr(struct inode *inode, struct file *file)
+ {
+ struct trace_array *tr = inode->i_private;
+
+ trace_array_put(tr);
+
+ return single_release(inode, file);
+ }
+
static int tracing_open(struct inode *inode, struct file *file)
{
+ struct trace_cpu *tc = inode->i_private;
+ struct trace_array *tr = tc->tr;
struct trace_iterator *iter;
int ret = 0;
+ if (trace_array_get(tr) < 0)
+ return -ENODEV;
+
/* If this file was open for write, then erase contents */
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC)) {
- struct trace_cpu *tc = inode->i_private;
- struct trace_array *tr = tc->tr;
-
if (tc->cpu == RING_BUFFER_ALL_CPUS)
tracing_reset_online_cpus(&tr->trace_buffer);
else
}
if (file->f_mode & FMODE_READ) {
- iter = __tracing_open(inode, file, false);
+ iter = __tracing_open(tr, tc, inode, file, false);
if (IS_ERR(iter))
ret = PTR_ERR(iter);
else if (trace_flags & TRACE_ITER_LATENCY_FMT)
iter->iter_flags |= TRACE_FILE_LAT_FMT;
}
+
+ if (ret < 0)
+ trace_array_put(tr);
+
return ret;
}
static int tracing_trace_options_open(struct inode *inode, struct file *file)
{
+ struct trace_array *tr = inode->i_private;
+
if (tracing_disabled)
return -ENODEV;
+ if (trace_array_get(tr) < 0)
+ return -ENODEV;
+
return single_open(file, tracing_trace_options_show, inode->i_private);
}
.open = tracing_trace_options_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = single_release,
+ .release = tracing_single_release_tr,
.write = tracing_trace_options_write,
};
if (tracing_disabled)
return -ENODEV;
+ if (trace_array_get(tr) < 0)
+ return -ENODEV;
+
mutex_lock(&trace_types_lock);
/* create a buffer to store the information to pass to userspace */
iter->iter_flags |= TRACE_FILE_LAT_FMT;
/* Output in nanoseconds only if we are using a clock in nanoseconds. */
- if (trace_clocks[trace_clock_id].in_ns)
+ if (trace_clocks[tr->clock_id].in_ns)
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
iter->cpu_file = tc->cpu;
fail:
kfree(iter->trace);
kfree(iter);
+ __trace_array_put(tr);
mutex_unlock(&trace_types_lock);
return ret;
}
static int tracing_release_pipe(struct inode *inode, struct file *file)
{
struct trace_iterator *iter = file->private_data;
+ struct trace_cpu *tc = inode->i_private;
+ struct trace_array *tr = tc->tr;
mutex_lock(&trace_types_lock);
kfree(iter->trace);
kfree(iter);
+ trace_array_put(tr);
+
return 0;
}
*
* iter->pos will be 0 if we haven't read anything.
*/
- if (!tracing_is_enabled() && iter->pos)
+ if (!tracing_is_on() && iter->pos)
break;
}
/* resize the ring buffer to 0 */
tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
+ trace_array_put(tr);
+
return 0;
}
size_t cnt, loff_t *fpos)
{
unsigned long addr = (unsigned long)ubuf;
+ struct trace_array *tr = filp->private_data;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
struct print_entry *entry;
local_save_flags(irq_flags);
size = sizeof(*entry) + cnt + 2; /* possible \n added */
- buffer = global_trace.trace_buffer.buffer;
+ buffer = tr->trace_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
irq_flags, preempt_count());
if (!event) {
static int tracing_clock_open(struct inode *inode, struct file *file)
{
+ struct trace_array *tr = inode->i_private;
+ int ret;
+
if (tracing_disabled)
return -ENODEV;
- return single_open(file, tracing_clock_show, inode->i_private);
+ if (trace_array_get(tr))
+ return -ENODEV;
+
+ ret = single_open(file, tracing_clock_show, inode->i_private);
+ if (ret < 0)
+ trace_array_put(tr);
+
+ return ret;
}
struct ftrace_buffer_info {
static int tracing_snapshot_open(struct inode *inode, struct file *file)
{
struct trace_cpu *tc = inode->i_private;
+ struct trace_array *tr = tc->tr;
struct trace_iterator *iter;
struct seq_file *m;
int ret = 0;
+ if (trace_array_get(tr) < 0)
+ return -ENODEV;
+
if (file->f_mode & FMODE_READ) {
- iter = __tracing_open(inode, file, true);
+ iter = __tracing_open(tr, tc, inode, file, true);
if (IS_ERR(iter))
ret = PTR_ERR(iter);
} else {
kfree(m);
return -ENOMEM;
}
- iter->tr = tc->tr;
+ iter->tr = tr;
iter->trace_buffer = &tc->tr->max_buffer;
iter->cpu_file = tc->cpu;
m->private = iter;
file->private_data = m;
}
+ if (ret < 0)
+ trace_array_put(tr);
+
return ret;
}
static int tracing_snapshot_release(struct inode *inode, struct file *file)
{
struct seq_file *m = file->private_data;
+ int ret;
+
+ ret = tracing_release(inode, file);
if (file->f_mode & FMODE_READ)
- return tracing_release(inode, file);
+ return ret;
/* If write only, the seq_file is just a stub */
if (m)
};
static const struct file_operations tracing_entries_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_generic_tc,
.read = tracing_entries_read,
.write = tracing_entries_write,
.llseek = generic_file_llseek,
+ .release = tracing_release_generic_tc,
};
static const struct file_operations tracing_total_entries_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_generic_tr,
.read = tracing_total_entries_read,
.llseek = generic_file_llseek,
+ .release = tracing_release_generic_tr,
};
static const struct file_operations tracing_free_buffer_fops = {
+ .open = tracing_open_generic_tr,
.write = tracing_free_buffer_write,
.release = tracing_free_buffer_release,
};
static const struct file_operations tracing_mark_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_generic_tr,
.write = tracing_mark_write,
.llseek = generic_file_llseek,
+ .release = tracing_release_generic_tr,
};
static const struct file_operations trace_clock_fops = {
.open = tracing_clock_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = single_release,
+ .release = tracing_single_release_tr,
.write = tracing_clock_write,
};
struct trace_cpu *tc = inode->i_private;
struct trace_array *tr = tc->tr;
struct ftrace_buffer_info *info;
+ int ret;
if (tracing_disabled)
return -ENODEV;
+ if (trace_array_get(tr) < 0)
+ return -ENODEV;
+
info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
+ if (!info) {
+ trace_array_put(tr);
return -ENOMEM;
+ }
mutex_lock(&trace_types_lock);
mutex_unlock(&trace_types_lock);
- return nonseekable_open(inode, filp);
+ ret = nonseekable_open(inode, filp);
+ if (ret < 0)
+ trace_array_put(tr);
+
+ return ret;
}
static unsigned int
mutex_lock(&trace_types_lock);
- WARN_ON(!iter->tr->ref);
- iter->tr->ref--;
+ __trace_array_put(iter->tr);
if (info->spare)
ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
trace_seq_printf(s, "bytes: %ld\n", cnt);
- if (trace_clocks[trace_clock_id].in_ns) {
+ if (trace_clocks[tr->clock_id].in_ns) {
/* local or global for trace_clock */
t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
usec_rem = do_div(t, USEC_PER_SEC);
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
char buf[64];
int r;
- if (buffer)
- r = ring_buffer_record_is_on(buffer);
- else
- r = 0;
-
+ r = tracer_tracing_is_on(tr);
r = sprintf(buf, "%d\n", r);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
if (buffer) {
mutex_lock(&trace_types_lock);
if (val) {
- ring_buffer_record_on(buffer);
+ tracer_tracing_on(tr);
if (tr->current_trace->start)
tr->current_trace->start(tr);
} else {
- ring_buffer_record_off(buffer);
+ tracer_tracing_off(tr);
if (tr->current_trace->stop)
tr->current_trace->stop(tr);
}
}
static const struct file_operations rb_simple_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_generic_tr,
.read = rb_simple_read,
.write = rb_simple_write,
+ .release = tracing_release_generic_tr,
.llseek = default_llseek,
};
trace_create_file("buffer_total_size_kb", 0444, d_tracer,
tr, &tracing_total_entries_fops);
- trace_create_file("free_buffer", 0644, d_tracer,
+ trace_create_file("free_buffer", 0200, d_tracer,
tr, &tracing_free_buffer_fops);
trace_create_file("trace_marker", 0220, d_tracer,