2018-08-16 15:23:53 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2009-05-06 02:47:18 +00:00
|
|
|
/*
|
|
|
|
* ring buffer tester and benchmark
|
|
|
|
*
|
|
|
|
* Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com>
|
|
|
|
*/
|
|
|
|
#include <linux/ring_buffer.h>
|
|
|
|
#include <linux/completion.h>
|
|
|
|
#include <linux/kthread.h>
|
2017-02-01 17:07:51 +00:00
|
|
|
#include <uapi/linux/sched/types.h>
|
2009-05-06 02:47:18 +00:00
|
|
|
#include <linux/module.h>
|
2015-01-28 14:16:11 +00:00
|
|
|
#include <linux/ktime.h>
|
2010-01-05 06:34:50 +00:00
|
|
|
#include <asm/local.h>
|
2009-05-06 02:47:18 +00:00
|
|
|
|
|
|
|
struct rb_page {
|
|
|
|
u64 ts;
|
|
|
|
local_t commit;
|
|
|
|
char data[4080];
|
|
|
|
};
|
|
|
|
|
|
|
|
/* run time and sleep time in seconds */
|
2015-01-28 14:16:11 +00:00
|
|
|
#define RUN_TIME 10ULL
|
2009-05-06 02:47:18 +00:00
|
|
|
#define SLEEP_TIME 10
|
|
|
|
|
|
|
|
/* number of events for writer to wake up the reader */
|
|
|
|
static int wakeup_interval = 100;
|
|
|
|
|
|
|
|
static int reader_finish;
|
2015-09-07 12:38:37 +00:00
|
|
|
static DECLARE_COMPLETION(read_start);
|
|
|
|
static DECLARE_COMPLETION(read_done);
|
2009-05-06 02:47:18 +00:00
|
|
|
|
2019-12-13 18:58:57 +00:00
|
|
|
static struct trace_buffer *buffer;
|
2009-05-06 02:47:18 +00:00
|
|
|
static struct task_struct *producer;
|
|
|
|
static struct task_struct *consumer;
|
|
|
|
static unsigned long read;
|
|
|
|
|
2015-06-10 08:12:07 +00:00
|
|
|
static unsigned int disable_reader;
|
2009-05-06 02:47:18 +00:00
|
|
|
module_param(disable_reader, uint, 0644);
|
|
|
|
MODULE_PARM_DESC(disable_reader, "only run producer");
|
|
|
|
|
2015-06-10 08:12:07 +00:00
|
|
|
static unsigned int write_iteration = 50;
|
2009-11-11 22:14:07 +00:00
|
|
|
module_param(write_iteration, uint, 0644);
|
|
|
|
MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
|
|
|
|
|
2014-02-24 14:12:01 +00:00
|
|
|
static int producer_nice = MAX_NICE;
|
|
|
|
static int consumer_nice = MAX_NICE;
|
2009-11-25 18:22:21 +00:00
|
|
|
|
2020-07-20 21:49:18 +00:00
|
|
|
static int producer_fifo;
|
|
|
|
static int consumer_fifo;
|
2009-11-25 18:22:21 +00:00
|
|
|
|
2015-06-10 08:11:13 +00:00
|
|
|
module_param(producer_nice, int, 0644);
|
2009-11-25 18:22:21 +00:00
|
|
|
MODULE_PARM_DESC(producer_nice, "nice prio for producer");
|
|
|
|
|
2015-06-10 08:11:13 +00:00
|
|
|
module_param(consumer_nice, int, 0644);
|
2009-11-25 18:22:21 +00:00
|
|
|
MODULE_PARM_DESC(consumer_nice, "nice prio for consumer");
|
|
|
|
|
2015-06-10 08:11:13 +00:00
|
|
|
module_param(producer_fifo, int, 0644);
|
2020-07-20 21:49:18 +00:00
|
|
|
MODULE_PARM_DESC(producer_fifo, "use fifo for producer: 0 - disabled, 1 - low prio, 2 - fifo");
|
2009-11-25 18:22:21 +00:00
|
|
|
|
2015-06-10 08:11:13 +00:00
|
|
|
module_param(consumer_fifo, int, 0644);
|
2020-07-20 21:49:18 +00:00
|
|
|
MODULE_PARM_DESC(consumer_fifo, "use fifo for consumer: 0 - disabled, 1 - low prio, 2 - fifo");
|
2009-11-25 18:22:21 +00:00
|
|
|
|
2009-05-06 02:47:18 +00:00
|
|
|
static int read_events;
|
|
|
|
|
2015-09-07 12:38:38 +00:00
|
|
|
static int test_error;
|
2009-05-06 02:47:18 +00:00
|
|
|
|
2015-09-07 12:38:38 +00:00
|
|
|
#define TEST_ERROR() \
|
2009-05-06 02:47:18 +00:00
|
|
|
do { \
|
2015-09-07 12:38:38 +00:00
|
|
|
if (!test_error) { \
|
|
|
|
test_error = 1; \
|
2009-05-06 02:47:18 +00:00
|
|
|
WARN_ON(1); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
enum event_status {
|
|
|
|
EVENT_FOUND,
|
|
|
|
EVENT_DROPPED,
|
|
|
|
};
|
|
|
|
|
2015-09-07 12:38:38 +00:00
|
|
|
static bool break_test(void)
|
|
|
|
{
|
|
|
|
return test_error || kthread_should_stop();
|
|
|
|
}
|
|
|
|
|
2009-05-06 02:47:18 +00:00
|
|
|
static enum event_status read_event(int cpu)
|
|
|
|
{
|
|
|
|
struct ring_buffer_event *event;
|
|
|
|
int *entry;
|
|
|
|
u64 ts;
|
|
|
|
|
ring-buffer: Add place holder recording of dropped events
Currently, when the ring buffer drops events, it does not record
the fact that it did so. It does inform the writer that the event
was dropped by returning a NULL event, but it does not put in any
place holder where the event was dropped.
This is not a trivial thing to add because the ring buffer mostly
runs in overwrite (flight recorder) mode. That is, when the ring
buffer is full, new data will overwrite old data.
In a produce/consumer mode, where new data is simply dropped when
the ring buffer is full, it is trivial to add the placeholder
for dropped events. When there's more room to write new data, then
a special event can be added to notify the reader about the dropped
events.
But in overwrite mode, any new write can overwrite events. A place
holder can not be inserted into the ring buffer since there never
may be room. A reader could also come in at anytime and miss the
placeholder.
Luckily, the way the ring buffer works, the read side can find out
if events were lost or not, and how many events. Everytime a write
takes place, if it overwrites the header page (the next read) it
updates a "overrun" variable that keeps track of the number of
lost events. When a reader swaps out a page from the ring buffer,
it can record this number, perfom the swap, and then check to
see if the number changed, and take the diff if it has, which would be
the number of events dropped. This can be stored by the reader
and returned to callers of the reader.
Since the reader page swap will fail if the writer moved the head
page since the time the reader page set up the swap, this gives room
to record the overruns without worrying about races. If the reader
sets up the pages, records the overrun, than performs the swap,
if the swap succeeds, then the overrun variable has not been
updated since the setup before the swap.
For binary readers of the ring buffer, a flag is set in the header
of each sub page (sub buffer) of the ring buffer. This flag is embedded
in the size field of the data on the sub buffer, in the 31st bit (the size
can be 32 or 64 bits depending on the architecture), but only 27
bits needs to be used for the actual size (less actually).
We could add a new field in the sub buffer header to also record the
number of events dropped since the last read, but this will change the
format of the binary ring buffer a bit too much. Perhaps this change can
be made if the information on the number of events dropped is considered
important enough.
Note, the notification of dropped events is only used by consuming reads
or peeking at the ring buffer. Iterating over the ring buffer does not
keep this information because the necessary data is only available when
a page swap is made, and the iterator does not swap out pages.
Cc: Robert Richter <robert.richter@amd.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: "Luis Claudio R. Goncalves" <lclaudio@uudg.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-03-31 17:21:56 +00:00
|
|
|
event = ring_buffer_consume(buffer, cpu, &ts, NULL);
|
2009-05-06 02:47:18 +00:00
|
|
|
if (!event)
|
|
|
|
return EVENT_DROPPED;
|
|
|
|
|
|
|
|
entry = ring_buffer_event_data(event);
|
|
|
|
if (*entry != cpu) {
|
2015-09-07 12:38:38 +00:00
|
|
|
TEST_ERROR();
|
2009-05-06 02:47:18 +00:00
|
|
|
return EVENT_DROPPED;
|
|
|
|
}
|
|
|
|
|
|
|
|
read++;
|
|
|
|
return EVENT_FOUND;
|
|
|
|
}
|
|
|
|
|
|
|
|
static enum event_status read_page(int cpu)
|
|
|
|
{
|
2023-12-19 18:54:19 +00:00
|
|
|
struct buffer_data_read_page *bpage;
|
2009-05-06 02:47:18 +00:00
|
|
|
struct ring_buffer_event *event;
|
|
|
|
struct rb_page *rpage;
|
|
|
|
unsigned long commit;
|
2023-12-19 18:54:19 +00:00
|
|
|
int page_size;
|
2009-05-06 02:47:18 +00:00
|
|
|
int *entry;
|
|
|
|
int ret;
|
|
|
|
int inc;
|
|
|
|
int i;
|
|
|
|
|
2011-05-04 00:56:42 +00:00
|
|
|
bpage = ring_buffer_alloc_read_page(buffer, cpu);
|
2017-08-02 18:20:54 +00:00
|
|
|
if (IS_ERR(bpage))
|
2009-05-06 16:40:51 +00:00
|
|
|
return EVENT_DROPPED;
|
|
|
|
|
2023-12-19 18:54:19 +00:00
|
|
|
page_size = ring_buffer_subbuf_size_get(buffer);
|
|
|
|
ret = ring_buffer_read_page(buffer, bpage, page_size, cpu, 1);
|
2009-05-06 02:47:18 +00:00
|
|
|
if (ret >= 0) {
|
2023-12-19 18:54:19 +00:00
|
|
|
rpage = ring_buffer_read_page_data(bpage);
|
2010-04-27 17:26:58 +00:00
|
|
|
/* The commit may have missed event flags set, clear them */
|
|
|
|
commit = local_read(&rpage->commit) & 0xfffff;
|
2015-09-07 12:38:38 +00:00
|
|
|
for (i = 0; i < commit && !test_error ; i += inc) {
|
2009-05-06 02:47:18 +00:00
|
|
|
|
2023-12-19 18:54:19 +00:00
|
|
|
if (i >= (page_size - offsetof(struct rb_page, data))) {
|
2015-09-07 12:38:38 +00:00
|
|
|
TEST_ERROR();
|
2009-05-06 02:47:18 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
inc = -1;
|
|
|
|
event = (void *)&rpage->data[i];
|
|
|
|
switch (event->type_len) {
|
|
|
|
case RINGBUF_TYPE_PADDING:
|
2009-06-16 15:46:09 +00:00
|
|
|
/* failed writes may be discarded events */
|
|
|
|
if (!event->time_delta)
|
2015-09-07 12:38:38 +00:00
|
|
|
TEST_ERROR();
|
2009-06-16 15:46:09 +00:00
|
|
|
inc = event->array[0] + 4;
|
2009-05-06 02:47:18 +00:00
|
|
|
break;
|
|
|
|
case RINGBUF_TYPE_TIME_EXTEND:
|
|
|
|
inc = 8;
|
|
|
|
break;
|
|
|
|
case 0:
|
|
|
|
entry = ring_buffer_event_data(event);
|
|
|
|
if (*entry != cpu) {
|
2015-09-07 12:38:38 +00:00
|
|
|
TEST_ERROR();
|
2009-05-06 02:47:18 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
read++;
|
|
|
|
if (!event->array[0]) {
|
2015-09-07 12:38:38 +00:00
|
|
|
TEST_ERROR();
|
2009-05-06 02:47:18 +00:00
|
|
|
break;
|
|
|
|
}
|
2009-06-16 15:46:09 +00:00
|
|
|
inc = event->array[0] + 4;
|
2009-05-06 02:47:18 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
entry = ring_buffer_event_data(event);
|
|
|
|
if (*entry != cpu) {
|
2015-09-07 12:38:38 +00:00
|
|
|
TEST_ERROR();
|
2009-05-06 02:47:18 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
read++;
|
|
|
|
inc = ((event->type_len + 1) * 4);
|
|
|
|
}
|
2015-09-07 12:38:38 +00:00
|
|
|
if (test_error)
|
2009-05-06 02:47:18 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
if (inc <= 0) {
|
2015-09-07 12:38:38 +00:00
|
|
|
TEST_ERROR();
|
2009-05-06 02:47:18 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-05-01 13:35:09 +00:00
|
|
|
ring_buffer_free_read_page(buffer, cpu, bpage);
|
2009-05-06 02:47:18 +00:00
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
return EVENT_DROPPED;
|
|
|
|
return EVENT_FOUND;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ring_buffer_consumer(void)
|
|
|
|
{
|
|
|
|
/* toggle between reading pages and events */
|
|
|
|
read_events ^= 1;
|
|
|
|
|
|
|
|
read = 0;
|
2015-09-07 12:38:37 +00:00
|
|
|
/*
|
|
|
|
* Continue running until the producer specifically asks to stop
|
|
|
|
* and is ready for the completion.
|
|
|
|
*/
|
|
|
|
while (!READ_ONCE(reader_finish)) {
|
|
|
|
int found = 1;
|
2009-05-06 02:47:18 +00:00
|
|
|
|
2015-09-07 12:38:38 +00:00
|
|
|
while (found && !test_error) {
|
2009-05-06 02:47:18 +00:00
|
|
|
int cpu;
|
|
|
|
|
|
|
|
found = 0;
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
enum event_status stat;
|
|
|
|
|
|
|
|
if (read_events)
|
|
|
|
stat = read_event(cpu);
|
|
|
|
else
|
|
|
|
stat = read_page(cpu);
|
|
|
|
|
2015-09-07 12:38:38 +00:00
|
|
|
if (test_error)
|
2009-05-06 02:47:18 +00:00
|
|
|
break;
|
2015-09-07 12:38:37 +00:00
|
|
|
|
2009-05-06 02:47:18 +00:00
|
|
|
if (stat == EVENT_FOUND)
|
|
|
|
found = 1;
|
2015-09-07 12:38:37 +00:00
|
|
|
|
2009-05-06 02:47:18 +00:00
|
|
|
}
|
2015-09-07 12:38:37 +00:00
|
|
|
}
|
2009-05-06 02:47:18 +00:00
|
|
|
|
2015-09-07 12:38:37 +00:00
|
|
|
/* Wait till the producer wakes us up when there is more data
|
|
|
|
* available or when the producer wants us to finish reading.
|
|
|
|
*/
|
2009-05-06 02:47:18 +00:00
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
if (reader_finish)
|
|
|
|
break;
|
|
|
|
|
|
|
|
schedule();
|
|
|
|
}
|
2015-09-07 12:38:37 +00:00
|
|
|
__set_current_state(TASK_RUNNING);
|
2009-05-06 02:47:18 +00:00
|
|
|
reader_finish = 0;
|
|
|
|
complete(&read_done);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ring_buffer_producer(void)
|
|
|
|
{
|
2015-01-28 14:16:11 +00:00
|
|
|
ktime_t start_time, end_time, timeout;
|
2009-05-06 02:47:18 +00:00
|
|
|
unsigned long long time;
|
|
|
|
unsigned long long entries;
|
|
|
|
unsigned long long overruns;
|
|
|
|
unsigned long missed = 0;
|
|
|
|
unsigned long hit = 0;
|
|
|
|
unsigned long avg;
|
|
|
|
int cnt = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Hammer the buffer for 10 secs (this may
|
|
|
|
* make the system stall)
|
|
|
|
*/
|
2009-06-17 21:01:09 +00:00
|
|
|
trace_printk("Starting ring buffer hammer\n");
|
2015-01-28 14:16:11 +00:00
|
|
|
start_time = ktime_get();
|
|
|
|
timeout = ktime_add_ns(start_time, RUN_TIME * NSEC_PER_SEC);
|
2009-05-06 02:47:18 +00:00
|
|
|
do {
|
|
|
|
struct ring_buffer_event *event;
|
|
|
|
int *entry;
|
2009-11-11 22:14:07 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < write_iteration; i++) {
|
|
|
|
event = ring_buffer_lock_reserve(buffer, 10);
|
|
|
|
if (!event) {
|
|
|
|
missed++;
|
|
|
|
} else {
|
|
|
|
hit++;
|
|
|
|
entry = ring_buffer_event_data(event);
|
|
|
|
*entry = smp_processor_id();
|
2022-10-20 14:06:51 +00:00
|
|
|
ring_buffer_unlock_commit(buffer);
|
2009-11-11 22:14:07 +00:00
|
|
|
}
|
2009-05-06 02:47:18 +00:00
|
|
|
}
|
2015-01-28 14:16:11 +00:00
|
|
|
end_time = ktime_get();
|
2009-05-06 02:47:18 +00:00
|
|
|
|
2009-05-07 18:20:28 +00:00
|
|
|
cnt++;
|
|
|
|
if (consumer && !(cnt % wakeup_interval))
|
2009-05-06 02:47:18 +00:00
|
|
|
wake_up_process(consumer);
|
|
|
|
|
2019-07-26 21:19:40 +00:00
|
|
|
#ifndef CONFIG_PREEMPTION
|
2009-05-07 15:13:42 +00:00
|
|
|
/*
|
2019-11-16 15:05:55 +00:00
|
|
|
* If we are a non preempt kernel, the 10 seconds run will
|
2009-05-07 15:13:42 +00:00
|
|
|
* stop everything while it runs. Instead, we will call
|
|
|
|
* cond_resched and also add any time that was lost by a
|
2019-11-16 15:05:55 +00:00
|
|
|
* reschedule.
|
2009-05-07 18:20:28 +00:00
|
|
|
*
|
|
|
|
* Do a cond resched at the same frequency we would wake up
|
|
|
|
* the reader.
|
2009-05-07 15:13:42 +00:00
|
|
|
*/
|
2009-05-07 18:20:28 +00:00
|
|
|
if (cnt % wakeup_interval)
|
|
|
|
cond_resched();
|
|
|
|
#endif
|
2015-09-07 12:38:38 +00:00
|
|
|
} while (ktime_before(end_time, timeout) && !break_test());
|
2009-06-17 21:01:09 +00:00
|
|
|
trace_printk("End ring buffer hammer\n");
|
2009-05-06 02:47:18 +00:00
|
|
|
|
|
|
|
if (consumer) {
|
|
|
|
/* Init both completions here to avoid races */
|
|
|
|
init_completion(&read_start);
|
|
|
|
init_completion(&read_done);
|
|
|
|
/* the completions must be visible before the finish var */
|
|
|
|
smp_wmb();
|
|
|
|
reader_finish = 1;
|
|
|
|
wake_up_process(consumer);
|
|
|
|
wait_for_completion(&read_done);
|
|
|
|
}
|
|
|
|
|
2015-01-28 14:16:11 +00:00
|
|
|
time = ktime_us_delta(end_time, start_time);
|
2009-05-06 02:47:18 +00:00
|
|
|
|
|
|
|
entries = ring_buffer_entries(buffer);
|
|
|
|
overruns = ring_buffer_overruns(buffer);
|
|
|
|
|
2015-09-07 12:38:38 +00:00
|
|
|
if (test_error)
|
2009-06-17 21:01:09 +00:00
|
|
|
trace_printk("ERROR!\n");
|
2009-11-25 18:22:21 +00:00
|
|
|
|
|
|
|
if (!disable_reader) {
|
2020-07-20 21:49:18 +00:00
|
|
|
if (consumer_fifo)
|
|
|
|
trace_printk("Running Consumer at SCHED_FIFO %s\n",
|
2024-10-18 11:07:10 +00:00
|
|
|
str_low_high(consumer_fifo == 1));
|
2020-07-20 21:49:18 +00:00
|
|
|
else
|
2009-11-25 18:22:21 +00:00
|
|
|
trace_printk("Running Consumer at nice: %d\n",
|
|
|
|
consumer_nice);
|
|
|
|
}
|
2020-07-20 21:49:18 +00:00
|
|
|
if (producer_fifo)
|
|
|
|
trace_printk("Running Producer at SCHED_FIFO %s\n",
|
2024-10-18 11:07:10 +00:00
|
|
|
str_low_high(producer_fifo == 1));
|
2020-07-20 21:49:18 +00:00
|
|
|
else
|
2009-11-25 18:22:21 +00:00
|
|
|
trace_printk("Running Producer at nice: %d\n",
|
|
|
|
producer_nice);
|
|
|
|
|
|
|
|
/* Let the user know that the test is running at low priority */
|
2020-07-20 21:49:18 +00:00
|
|
|
if (!producer_fifo && !consumer_fifo &&
|
2014-02-24 14:12:01 +00:00
|
|
|
producer_nice == MAX_NICE && consumer_nice == MAX_NICE)
|
2009-11-25 18:22:21 +00:00
|
|
|
trace_printk("WARNING!!! This test is running at lowest priority.\n");
|
|
|
|
|
2009-06-17 21:01:09 +00:00
|
|
|
trace_printk("Time: %lld (usecs)\n", time);
|
|
|
|
trace_printk("Overruns: %lld\n", overruns);
|
2009-05-06 02:47:18 +00:00
|
|
|
if (disable_reader)
|
2009-06-17 21:01:09 +00:00
|
|
|
trace_printk("Read: (reader disabled)\n");
|
2009-05-06 02:47:18 +00:00
|
|
|
else
|
2009-06-17 21:01:09 +00:00
|
|
|
trace_printk("Read: %ld (by %s)\n", read,
|
2009-05-06 02:47:18 +00:00
|
|
|
read_events ? "events" : "pages");
|
2009-06-17 21:01:09 +00:00
|
|
|
trace_printk("Entries: %lld\n", entries);
|
|
|
|
trace_printk("Total: %lld\n", entries + overruns + read);
|
|
|
|
trace_printk("Missed: %ld\n", missed);
|
|
|
|
trace_printk("Hit: %ld\n", hit);
|
2009-05-06 02:47:18 +00:00
|
|
|
|
2009-05-08 14:56:33 +00:00
|
|
|
/* Convert time from usecs to millisecs */
|
|
|
|
do_div(time, USEC_PER_MSEC);
|
2009-05-06 02:47:18 +00:00
|
|
|
if (time)
|
|
|
|
hit /= (long)time;
|
|
|
|
else
|
2009-06-17 21:01:09 +00:00
|
|
|
trace_printk("TIME IS ZERO??\n");
|
2009-05-06 02:47:18 +00:00
|
|
|
|
2009-06-17 21:01:09 +00:00
|
|
|
trace_printk("Entries per millisec: %ld\n", hit);
|
2009-05-06 02:47:18 +00:00
|
|
|
|
|
|
|
if (hit) {
|
2009-05-08 14:56:33 +00:00
|
|
|
/* Calculate the average time in nanosecs */
|
|
|
|
avg = NSEC_PER_MSEC / hit;
|
2009-06-17 21:01:09 +00:00
|
|
|
trace_printk("%ld ns per entry\n", avg);
|
2009-05-06 02:47:18 +00:00
|
|
|
}
|
2009-05-07 23:52:20 +00:00
|
|
|
|
|
|
|
if (missed) {
|
|
|
|
if (time)
|
|
|
|
missed /= (long)time;
|
|
|
|
|
2009-06-17 21:01:09 +00:00
|
|
|
trace_printk("Total iterations per millisec: %ld\n",
|
|
|
|
hit + missed);
|
2009-05-07 23:52:20 +00:00
|
|
|
|
2009-05-08 15:03:57 +00:00
|
|
|
/* it is possible that hit + missed will overflow and be zero */
|
|
|
|
if (!(hit + missed)) {
|
2009-06-17 21:01:09 +00:00
|
|
|
trace_printk("hit + missed overflowed and totalled zero!\n");
|
2009-05-08 15:03:57 +00:00
|
|
|
hit--; /* make it non zero */
|
|
|
|
}
|
|
|
|
|
2018-11-01 15:46:40 +00:00
|
|
|
/* Calculate the average time in nanosecs */
|
2009-05-08 14:56:33 +00:00
|
|
|
avg = NSEC_PER_MSEC / (hit + missed);
|
2009-06-17 21:01:09 +00:00
|
|
|
trace_printk("%ld ns per entry\n", avg);
|
2009-05-07 23:52:20 +00:00
|
|
|
}
|
2009-05-06 02:47:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void wait_to_die(void)
|
|
|
|
{
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
while (!kthread_should_stop()) {
|
|
|
|
schedule();
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
}
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ring_buffer_consumer_thread(void *arg)
|
|
|
|
{
|
2015-09-07 12:38:38 +00:00
|
|
|
while (!break_test()) {
|
2009-05-06 02:47:18 +00:00
|
|
|
complete(&read_start);
|
|
|
|
|
|
|
|
ring_buffer_consumer();
|
|
|
|
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
2015-09-07 12:38:38 +00:00
|
|
|
if (break_test())
|
2009-05-06 02:47:18 +00:00
|
|
|
break;
|
|
|
|
schedule();
|
|
|
|
}
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
|
2015-06-15 13:53:10 +00:00
|
|
|
if (!kthread_should_stop())
|
2009-05-06 02:47:18 +00:00
|
|
|
wait_to_die();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ring_buffer_producer_thread(void *arg)
|
|
|
|
{
|
2015-09-07 12:38:38 +00:00
|
|
|
while (!break_test()) {
|
2009-05-06 02:47:18 +00:00
|
|
|
ring_buffer_reset(buffer);
|
|
|
|
|
|
|
|
if (consumer) {
|
|
|
|
wake_up_process(consumer);
|
|
|
|
wait_for_completion(&read_start);
|
|
|
|
}
|
|
|
|
|
|
|
|
ring_buffer_producer();
|
2015-09-07 12:38:38 +00:00
|
|
|
if (break_test())
|
2015-06-15 13:53:10 +00:00
|
|
|
goto out_kill;
|
2009-05-06 02:47:18 +00:00
|
|
|
|
2009-06-17 21:01:09 +00:00
|
|
|
trace_printk("Sleeping for 10 secs\n");
|
2009-05-06 02:47:18 +00:00
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
2015-09-07 12:38:38 +00:00
|
|
|
if (break_test())
|
|
|
|
goto out_kill;
|
2009-05-06 02:47:18 +00:00
|
|
|
schedule_timeout(HZ * SLEEP_TIME);
|
|
|
|
}
|
|
|
|
|
2015-06-15 13:53:10 +00:00
|
|
|
out_kill:
|
2015-09-07 12:38:38 +00:00
|
|
|
__set_current_state(TASK_RUNNING);
|
2015-06-15 13:53:10 +00:00
|
|
|
if (!kthread_should_stop())
|
2009-05-06 02:47:18 +00:00
|
|
|
wait_to_die();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init ring_buffer_benchmark_init(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* make a one meg buffer in overwite mode */
|
|
|
|
buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE);
|
|
|
|
if (!buffer)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (!disable_reader) {
|
|
|
|
consumer = kthread_create(ring_buffer_consumer_thread,
|
|
|
|
NULL, "rb_consumer");
|
|
|
|
ret = PTR_ERR(consumer);
|
|
|
|
if (IS_ERR(consumer))
|
|
|
|
goto out_fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
producer = kthread_run(ring_buffer_producer_thread,
|
|
|
|
NULL, "rb_producer");
|
|
|
|
ret = PTR_ERR(producer);
|
|
|
|
|
|
|
|
if (IS_ERR(producer))
|
|
|
|
goto out_kill;
|
|
|
|
|
2009-11-23 07:03:09 +00:00
|
|
|
/*
|
|
|
|
* Run them as low-prio background tasks by default:
|
|
|
|
*/
|
2009-11-25 18:22:21 +00:00
|
|
|
if (!disable_reader) {
|
2020-07-20 21:49:18 +00:00
|
|
|
if (consumer_fifo >= 2)
|
|
|
|
sched_set_fifo(consumer);
|
|
|
|
else if (consumer_fifo == 1)
|
|
|
|
sched_set_fifo_low(consumer);
|
|
|
|
else
|
2009-11-25 18:22:21 +00:00
|
|
|
set_user_nice(consumer, consumer_nice);
|
|
|
|
}
|
|
|
|
|
2020-07-20 21:49:18 +00:00
|
|
|
if (producer_fifo >= 2)
|
|
|
|
sched_set_fifo(producer);
|
|
|
|
else if (producer_fifo == 1)
|
|
|
|
sched_set_fifo_low(producer);
|
|
|
|
else
|
2009-11-25 18:22:21 +00:00
|
|
|
set_user_nice(producer, producer_nice);
|
2009-11-23 07:03:09 +00:00
|
|
|
|
2009-05-06 02:47:18 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_kill:
|
|
|
|
if (consumer)
|
|
|
|
kthread_stop(consumer);
|
|
|
|
|
|
|
|
out_fail:
|
|
|
|
ring_buffer_free(buffer);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit ring_buffer_benchmark_exit(void)
|
|
|
|
{
|
|
|
|
kthread_stop(producer);
|
|
|
|
if (consumer)
|
|
|
|
kthread_stop(consumer);
|
|
|
|
ring_buffer_free(buffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(ring_buffer_benchmark_init);
|
|
|
|
module_exit(ring_buffer_benchmark_exit);
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Steven Rostedt");
|
|
|
|
MODULE_DESCRIPTION("ring_buffer_benchmark");
|
|
|
|
MODULE_LICENSE("GPL");
|