ring-buffer: Remove useless unused tracing_off_permanent()
The tracing_off_permanent() call is a way to disable all ring_buffers. Nothing uses it and nothing should use it, as tracing_off() and friends are better, as they disable the ring buffers related to tracing. The tracing_off_permanent() even disabled non tracing ring buffers. This is a bit drastic, and was added to handle NMIs doing outputs that could corrupt the ring buffer when only tracing used them. It is now obsolete and adds a little overhead, it should be removed. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
289a5a25c5
commit
3c6296f716
2 changed files with 0 additions and 67 deletions
|
@ -532,12 +532,6 @@ bool mac_pton(const char *s, u8 *mac);
|
||||||
*
|
*
|
||||||
* Most likely, you want to use tracing_on/tracing_off.
|
* Most likely, you want to use tracing_on/tracing_off.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_RING_BUFFER
|
|
||||||
/* trace_off_permanent stops recording with no way to bring it back */
|
|
||||||
void tracing_off_permanent(void);
|
|
||||||
#else
|
|
||||||
static inline void tracing_off_permanent(void) { }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
enum ftrace_dump_mode {
|
enum ftrace_dump_mode {
|
||||||
DUMP_NONE,
|
DUMP_NONE,
|
||||||
|
|
|
@ -115,63 +115,11 @@ int ring_buffer_print_entry_header(struct trace_seq *s)
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
|
||||||
* A fast way to enable or disable all ring buffers is to
|
|
||||||
* call tracing_on or tracing_off. Turning off the ring buffers
|
|
||||||
* prevents all ring buffers from being recorded to.
|
|
||||||
* Turning this switch on, makes it OK to write to the
|
|
||||||
* ring buffer, if the ring buffer is enabled itself.
|
|
||||||
*
|
|
||||||
* There's three layers that must be on in order to write
|
|
||||||
* to the ring buffer.
|
|
||||||
*
|
|
||||||
* 1) This global flag must be set.
|
|
||||||
* 2) The ring buffer must be enabled for recording.
|
|
||||||
* 3) The per cpu buffer must be enabled for recording.
|
|
||||||
*
|
|
||||||
* In case of an anomaly, this global flag has a bit set that
|
|
||||||
* will permantly disable all ring buffers.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Global flag to disable all recording to ring buffers
|
|
||||||
* This has two bits: ON, DISABLED
|
|
||||||
*
|
|
||||||
* ON DISABLED
|
|
||||||
* ---- ----------
|
|
||||||
* 0 0 : ring buffers are off
|
|
||||||
* 1 0 : ring buffers are on
|
|
||||||
* X 1 : ring buffers are permanently disabled
|
|
||||||
*/
|
|
||||||
|
|
||||||
enum {
|
|
||||||
RB_BUFFERS_ON_BIT = 0,
|
|
||||||
RB_BUFFERS_DISABLED_BIT = 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum {
|
|
||||||
RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
|
|
||||||
RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
|
|
||||||
};
|
|
||||||
|
|
||||||
static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
|
|
||||||
|
|
||||||
/* Used for individual buffers (after the counter) */
|
/* Used for individual buffers (after the counter) */
|
||||||
#define RB_BUFFER_OFF (1 << 20)
|
#define RB_BUFFER_OFF (1 << 20)
|
||||||
|
|
||||||
#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
|
#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
|
||||||
|
|
||||||
/**
|
|
||||||
* tracing_off_permanent - permanently disable ring buffers
|
|
||||||
*
|
|
||||||
* This function, once called, will disable all ring buffers
|
|
||||||
* permanently.
|
|
||||||
*/
|
|
||||||
void tracing_off_permanent(void)
|
|
||||||
{
|
|
||||||
set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
|
#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
|
||||||
#define RB_ALIGNMENT 4U
|
#define RB_ALIGNMENT 4U
|
||||||
#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
|
#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
|
||||||
|
@ -2728,9 +2676,6 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
|
||||||
struct ring_buffer_event *event;
|
struct ring_buffer_event *event;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
if (ring_buffer_flags != RB_BUFFERS_ON)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
/* If we are tracing schedule, we don't want to recurse */
|
/* If we are tracing schedule, we don't want to recurse */
|
||||||
preempt_disable_notrace();
|
preempt_disable_notrace();
|
||||||
|
|
||||||
|
@ -2992,9 +2937,6 @@ int ring_buffer_write(struct ring_buffer *buffer,
|
||||||
int ret = -EBUSY;
|
int ret = -EBUSY;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
if (ring_buffer_flags != RB_BUFFERS_ON)
|
|
||||||
return -EBUSY;
|
|
||||||
|
|
||||||
preempt_disable_notrace();
|
preempt_disable_notrace();
|
||||||
|
|
||||||
if (atomic_read(&buffer->record_disabled))
|
if (atomic_read(&buffer->record_disabled))
|
||||||
|
@ -4350,9 +4292,6 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
|
||||||
|
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
|
|
||||||
if (ring_buffer_flags != RB_BUFFERS_ON)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (atomic_read(&buffer_a->record_disabled))
|
if (atomic_read(&buffer_a->record_disabled))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue