···154154155155static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;156156157157+/* Used for individual buffers (after the counter) */158158+#define RB_BUFFER_OFF (1 << 20)159159+157160#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)158158-159159-/**160160- * tracing_on - enable all tracing buffers161161- *162162- * This function enables all tracing buffers that may have been163163- * disabled with tracing_off.164164- */165165-void tracing_on(void)166166-{167167- set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);168168-}169169-EXPORT_SYMBOL_GPL(tracing_on);170170-171171-/**172172- * tracing_off - turn off all tracing buffers173173- *174174- * This function stops all tracing buffers from recording data.175175- * It does not disable any overhead the tracers themselves may176176- * be causing. This function simply causes all recording to177177- * the ring buffers to fail.178178- */179179-void tracing_off(void)180180-{181181- clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);182182-}183183-EXPORT_SYMBOL_GPL(tracing_off);184161185162/**186163 * tracing_off_permanent - permanently disable ring buffers···169192{170193 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);171194}172172-173173-/**174174- * tracing_is_on - show state of ring buffers enabled175175- */176176-int tracing_is_on(void)177177-{178178- return ring_buffer_flags == RB_BUFFERS_ON;179179-}180180-EXPORT_SYMBOL_GPL(tracing_is_on);181195182196#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))183197#define RB_ALIGNMENT 4U···25872619EXPORT_SYMBOL_GPL(ring_buffer_record_enable);2588262025892621/**26222622+ * ring_buffer_record_off - stop all writes into the buffer26232623+ * @buffer: The ring buffer to stop writes to.26242624+ *26252625+ * This prevents all writes to the buffer. Any attempt to write26262626+ * to the buffer after this will fail and return NULL.26272627+ *26282628+ * This is different than ring_buffer_record_disable() as26292629+ * it works like an on/off switch, where as the disable() verison26302630+ * must be paired with a enable().26312631+ */26322632+void ring_buffer_record_off(struct ring_buffer *buffer)26332633+{26342634+ unsigned int rd;26352635+ unsigned int new_rd;26362636+26372637+ do {26382638+ rd = atomic_read(&buffer->record_disabled);26392639+ new_rd = rd | RB_BUFFER_OFF;26402640+ } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);26412641+}26422642+EXPORT_SYMBOL_GPL(ring_buffer_record_off);26432643+26442644+/**26452645+ * ring_buffer_record_on - restart writes into the buffer26462646+ * @buffer: The ring buffer to start writes to.26472647+ *26482648+ * This enables all writes to the buffer that was disabled by26492649+ * ring_buffer_record_off().26502650+ *26512651+ * This is different than ring_buffer_record_enable() as26522652+ * it works like an on/off switch, where as the enable() verison26532653+ * must be paired with a disable().26542654+ */26552655+void ring_buffer_record_on(struct ring_buffer *buffer)26562656+{26572657+ unsigned int rd;26582658+ unsigned int new_rd;26592659+26602660+ do {26612661+ rd = atomic_read(&buffer->record_disabled);26622662+ new_rd = rd & ~RB_BUFFER_OFF;26632663+ } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);26642664+}26652665+EXPORT_SYMBOL_GPL(ring_buffer_record_on);26662666+26672667+/**26682668+ * ring_buffer_record_is_on - return true if the ring buffer can write26692669+ * @buffer: The ring buffer to see if write is enabled26702670+ *26712671+ * Returns true if the ring buffer is in a state that it accepts writes.26722672+ */26732673+int ring_buffer_record_is_on(struct ring_buffer *buffer)26742674+{26752675+ return !atomic_read(&buffer->record_disabled);26762676+}26772677+26782678+/**25902679 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer25912680 * @buffer: The ring buffer to stop writes to.25922681 * @cpu: The CPU buffer to stop···40634038 return ret;40644039}40654040EXPORT_SYMBOL_GPL(ring_buffer_read_page);40664066-40674067-#ifdef CONFIG_TRACING40684068-static ssize_t40694069-rb_simple_read(struct file *filp, char __user *ubuf,40704070- size_t cnt, loff_t *ppos)40714071-{40724072- unsigned long *p = filp->private_data;40734073- char buf[64];40744074- int r;40754075-40764076- if (test_bit(RB_BUFFERS_DISABLED_BIT, p))40774077- r = sprintf(buf, "permanently disabled\n");40784078- else40794079- r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));40804080-40814081- return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);40824082-}40834083-40844084-static ssize_t40854085-rb_simple_write(struct file *filp, const char __user *ubuf,40864086- size_t cnt, loff_t *ppos)40874087-{40884088- unsigned long *p = filp->private_data;40894089- unsigned long val;40904090- int ret;40914091-40924092- ret = kstrtoul_from_user(ubuf, cnt, 10, &val);40934093- if (ret)40944094- return ret;40954095-40964096- if (val)40974097- set_bit(RB_BUFFERS_ON_BIT, p);40984098- else40994099- clear_bit(RB_BUFFERS_ON_BIT, p);41004100-41014101- (*ppos)++;41024102-41034103- return cnt;41044104-}41054105-41064106-static const struct file_operations rb_simple_fops = {41074107- .open = tracing_open_generic,41084108- .read = rb_simple_read,41094109- .write = rb_simple_write,41104110- .llseek = default_llseek,41114111-};41124112-41134113-41144114-static __init int rb_init_debugfs(void)41154115-{41164116- struct dentry *d_tracer;41174117-41184118- d_tracer = tracing_init_dentry();41194119-41204120- trace_create_file("tracing_on", 0644, d_tracer,41214121- &ring_buffer_flags, &rb_simple_fops);41224122-41234123- return 0;41244124-}41254125-41264126-fs_initcall(rb_init_debugfs);41274127-#endif4128404141294042#ifdef CONFIG_HOTPLUG_CPU41304043static int rb_cpu_notify(struct notifier_block *self,
+109
kernel/trace/trace.c
···3636#include <linux/ctype.h>3737#include <linux/init.h>3838#include <linux/poll.h>3939+#include <linux/nmi.h>3940#include <linux/fs.h>40414142#include "trace.h"···351350}352351353352static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);353353+354354+/**355355+ * tracing_on - enable tracing buffers356356+ *357357+ * This function enables tracing buffers that may have been358358+ * disabled with tracing_off.359359+ */360360+void tracing_on(void)361361+{362362+ if (global_trace.buffer)363363+ ring_buffer_record_on(global_trace.buffer);364364+ /*365365+ * This flag is only looked at when buffers haven't been366366+ * allocated yet. We don't really care about the race367367+ * between setting this flag and actually turning368368+ * on the buffer.369369+ */370370+ global_trace.buffer_disabled = 0;371371+}372372+EXPORT_SYMBOL_GPL(tracing_on);373373+374374+/**375375+ * tracing_off - turn off tracing buffers376376+ *377377+ * This function stops the tracing buffers from recording data.378378+ * It does not disable any overhead the tracers themselves may379379+ * be causing. This function simply causes all recording to380380+ * the ring buffers to fail.381381+ */382382+void tracing_off(void)383383+{384384+ if (global_trace.buffer)385385+ ring_buffer_record_on(global_trace.buffer);386386+ /*387387+ * This flag is only looked at when buffers haven't been388388+ * allocated yet. We don't really care about the race389389+ * between setting this flag and actually turning390390+ * on the buffer.391391+ */392392+ global_trace.buffer_disabled = 1;393393+}394394+EXPORT_SYMBOL_GPL(tracing_off);395395+396396+/**397397+ * tracing_is_on - show state of ring buffers enabled398398+ */399399+int tracing_is_on(void)400400+{401401+ if (global_trace.buffer)402402+ return ring_buffer_record_is_on(global_trace.buffer);403403+ return !global_trace.buffer_disabled;404404+}405405+EXPORT_SYMBOL_GPL(tracing_is_on);354406355407/**356408 * trace_wake_up - wake up tasks waiting for trace input···46214567 create_trace_option_core_file(trace_options[i], i);46224568}4623456945704570+static ssize_t45714571+rb_simple_read(struct file *filp, char __user *ubuf,45724572+ size_t cnt, loff_t *ppos)45734573+{45744574+ struct ring_buffer *buffer = filp->private_data;45754575+ char buf[64];45764576+ int r;45774577+45784578+ if (buffer)45794579+ r = ring_buffer_record_is_on(buffer);45804580+ else45814581+ r = 0;45824582+45834583+ r = sprintf(buf, "%d\n", r);45844584+45854585+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);45864586+}45874587+45884588+static ssize_t45894589+rb_simple_write(struct file *filp, const char __user *ubuf,45904590+ size_t cnt, loff_t *ppos)45914591+{45924592+ struct ring_buffer *buffer = filp->private_data;45934593+ unsigned long val;45944594+ int ret;45954595+45964596+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);45974597+ if (ret)45984598+ return ret;45994599+46004600+ if (buffer) {46014601+ if (val)46024602+ ring_buffer_record_on(buffer);46034603+ else46044604+ ring_buffer_record_off(buffer);46054605+ }46064606+46074607+ (*ppos)++;46084608+46094609+ return cnt;46104610+}46114611+46124612+static const struct file_operations rb_simple_fops = {46134613+ .open = tracing_open_generic,46144614+ .read = rb_simple_read,46154615+ .write = rb_simple_write,46164616+ .llseek = default_llseek,46174617+};46184618+46244619static __init int tracer_init_debugfs(void)46254620{46264621 struct dentry *d_tracer;···4728462547294626 trace_create_file("trace_clock", 0644, d_tracer, NULL,47304627 &trace_clock_fops);46284628+46294629+ trace_create_file("tracing_on", 0644, d_tracer,46304630+ global_trace.buffer, &rb_simple_fops);4731463147324632#ifdef CONFIG_DYNAMIC_FTRACE47334633 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,···49044798 if (ret != TRACE_TYPE_NO_CONSUME)49054799 trace_consume(&iter);49064800 }48014801+ touch_nmi_watchdog();4907480249084803 trace_printk_seq(&iter.seq);49094804 }···49704863 goto out_free_cpumask;49714864 }49724865 global_trace.entries = ring_buffer_size(global_trace.buffer);48664866+ if (global_trace.buffer_disabled)48674867+ tracing_off();497348684974486949754870#ifdef CONFIG_TRACER_MAX_TRACE