#include <linux/delay.h>
#include <linux/module.h>
#include <linux/percpu.h>
+#include <linux/kref.h>
+#include <linux/percpu-defs.h>
+#include <linux/timer.h>
#include <asm/cacheflush.h>
#include <wrapper/ringbuffer/config.h>
#include <wrapper/ringbuffer/iterator.h>
#include <wrapper/ringbuffer/nohz.h>
#include <wrapper/atomic.h>
-#include <wrapper/kref.h>
-#include <wrapper/percpu-defs.h>
-#include <wrapper/timer.h>
-#include <wrapper/vmalloc.h>
/*
* Internal structure representing offsets to use at a sub-buffer switch.
struct channel *chan = buf->backend.chan;
lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu);
- lttng_kvfree(buf->commit_hot);
- lttng_kvfree(buf->commit_cold);
- lttng_kvfree(buf->ts_end);
+ kvfree(buf->commit_hot);
+ kvfree(buf->commit_cold);
+ kvfree(buf->ts_end);
lib_ring_buffer_backend_free(&buf->backend);
}
return ret;
buf->commit_hot =
- lttng_kvzalloc_node(ALIGN(sizeof(*buf->commit_hot)
+ kvzalloc_node(ALIGN(sizeof(*buf->commit_hot)
* chan->backend.num_subbuf,
1 << INTERNODE_CACHE_SHIFT),
GFP_KERNEL | __GFP_NOWARN,
}
buf->commit_cold =
- lttng_kvzalloc_node(ALIGN(sizeof(*buf->commit_cold)
+ kvzalloc_node(ALIGN(sizeof(*buf->commit_cold)
* chan->backend.num_subbuf,
1 << INTERNODE_CACHE_SHIFT),
GFP_KERNEL | __GFP_NOWARN,
}
buf->ts_end =
- lttng_kvzalloc_node(ALIGN(sizeof(*buf->ts_end)
+ kvzalloc_node(ALIGN(sizeof(*buf->ts_end)
* chan->backend.num_subbuf,
1 << INTERNODE_CACHE_SHIFT),
GFP_KERNEL | __GFP_NOWARN,
chan->backend.cpumask));
cpumask_set_cpu(cpu, chan->backend.cpumask);
}
-
return 0;
/* Error handling */
free_init:
- lttng_kvfree(buf->ts_end);
+ kvfree(buf->ts_end);
free_commit_cold:
- lttng_kvfree(buf->commit_cold);
+ kvfree(buf->commit_cold);
free_commit:
- lttng_kvfree(buf->commit_hot);
+ kvfree(buf->commit_hot);
free_chanbuf:
lib_ring_buffer_backend_free(&buf->backend);
return ret;
}
-static void switch_buffer_timer(LTTNG_TIMER_FUNC_ARG_TYPE t)
+static void switch_buffer_timer(struct timer_list *t)
{
- struct lib_ring_buffer *buf = lttng_from_timer(buf, t, switch_timer);
+ struct lib_ring_buffer *buf = from_timer(buf, t, switch_timer);
struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = &chan->backend.config;
/*
* Only flush buffers periodically if readers are active.
if (atomic_long_read(&buf->active_readers))
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- lttng_mod_timer_pinned(&buf->switch_timer,
- jiffies + chan->switch_timer_interval);
- else
- mod_timer(&buf->switch_timer,
- jiffies + chan->switch_timer_interval);
+ mod_timer(&buf->switch_timer,
+ jiffies + chan->switch_timer_interval);
}
/*
return;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- flags = LTTNG_TIMER_PINNED;
+ flags = TIMER_PINNED;
- lttng_timer_setup(&buf->switch_timer, switch_buffer_timer, flags, buf);
+ timer_setup(&buf->switch_timer, switch_buffer_timer, flags);
buf->switch_timer.expires = jiffies + chan->switch_timer_interval;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
/*
* Polling timer to check the channels for data.
*/
-static void read_buffer_timer(LTTNG_TIMER_FUNC_ARG_TYPE t)
+static void read_buffer_timer(struct timer_list *t)
{
- struct lib_ring_buffer *buf = lttng_from_timer(buf, t, read_timer);
+ struct lib_ring_buffer *buf = from_timer(buf, t, read_timer);
struct channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
wake_up_interruptible(&chan->read_wait);
}
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- lttng_mod_timer_pinned(&buf->read_timer,
- jiffies + chan->read_timer_interval);
- else
- mod_timer(&buf->read_timer,
- jiffies + chan->read_timer_interval);
+ mod_timer(&buf->read_timer,
+ jiffies + chan->read_timer_interval);
}
/*
return;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- flags = LTTNG_TIMER_PINNED;
+ flags = TIMER_PINNED;
- lttng_timer_setup(&buf->read_timer, read_buffer_timer, flags, buf);
+ timer_setup(&buf->read_timer, read_buffer_timer, flags);
buf->read_timer.expires = jiffies + chan->read_timer_interval;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
raw_spin_unlock(&buf->raw_tick_nohz_spinlock);
break;
case TICK_NOHZ_STOP:
- spin_lock(lttng_this_cpu_ptr(&ring_buffer_nohz_lock));
+ spin_lock(this_cpu_ptr(&ring_buffer_nohz_lock));
lib_ring_buffer_stop_switch_timer(buf);
lib_ring_buffer_stop_read_timer(buf);
- spin_unlock(lttng_this_cpu_ptr(&ring_buffer_nohz_lock));
+ spin_unlock(this_cpu_ptr(&ring_buffer_nohz_lock));
break;
case TICK_NOHZ_RESTART:
- spin_lock(lttng_this_cpu_ptr(&ring_buffer_nohz_lock));
+ spin_lock(this_cpu_ptr(&ring_buffer_nohz_lock));
lib_ring_buffer_start_read_timer(buf);
lib_ring_buffer_start_switch_timer(buf);
- spin_unlock(lttng_this_cpu_ptr(&ring_buffer_nohz_lock));
+ spin_unlock(this_cpu_ptr(&ring_buffer_nohz_lock));
break;
}
if (!atomic_long_add_unless(&buf->active_readers, 1, 1))
return -EBUSY;
- if (!lttng_kref_get(&chan->ref)) {
- atomic_long_dec(&buf->active_readers);
- return -EOVERFLOW;
- }
+ kref_get(&chan->ref);
lttng_smp_mb__after_atomic();
return 0;
}