Merge branch 'master' of ssh://git.lttng.org/home/git/lttng-modules
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Sat, 11 Jun 2011 17:19:13 +0000 (13:19 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Sat, 11 Jun 2011 17:19:13 +0000 (13:19 -0400)
17 files changed:
Makefile
lib/Makefile
lib/ringbuffer/frontend.h
lib/ringbuffer/frontend_types.h
lib/ringbuffer/ring_buffer_frontend.c
lib/ringbuffer/ring_buffer_vfs.c
ltt-context.c
ltt-debugfs-abi.c
ltt-events.c
ltt-events.h
ltt-ring-buffer-client.h
ltt-ring-buffer-metadata-client.h
lttng-context-perf-counters.c
wrapper/poll.c [new file with mode: 0644]
wrapper/poll.h
wrapper/splice.c [new file with mode: 0644]
wrapper/splice.h

index dc44fd40b6cfb2834ef0b1e89861aa5e85ecafc4..d6f1a886c220f03c256bbd35a419b783902cdd8e 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -13,7 +13,8 @@ obj-m += ltt-relay.o
 ltt-relay-objs :=  ltt-events.o ltt-debugfs-abi.o \
                        ltt-probes.o ltt-core.o ltt-context.o \
                        lttng-context-pid.o lttng-context-comm.o \
-                       lttng-context-prio.o lttng-context-nice.o
+                       lttng-context-prio.o lttng-context-nice.o \
+                       wrapper/poll.o
 
 ifneq ($(CONFIG_PERF_EVENTS),)
 ltt-relay-objs += lttng-context-perf-counters.o
index 9fa49efc9b7037766e326bb87bc4497175c436dd..3fc82edc8b8a1ee1443a31cc6778719a19dd5f24 100644 (file)
@@ -7,4 +7,6 @@ lib-ring-buffer-objs := \
        ringbuffer/ring_buffer_vfs.o \
        ringbuffer/ring_buffer_splice.o \
        ringbuffer/ring_buffer_mmap.o \
-       prio_heap/lttng_prio_heap.o
+       prio_heap/lttng_prio_heap.o \
+       ../wrapper/poll.o \
+       ../wrapper/splice.o
index 003c2e1915f70cb7e53601efe9e8de0fb0d9b0e1..01af77a281b046a9dfc8d3de2d0eb69daa98c15e 100644 (file)
@@ -157,6 +157,18 @@ int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config *config,
        return finalized;
 }
 
+static inline
+int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
+{
+       return chan->finalized;
+}
+
+static inline
+int lib_ring_buffer_channel_is_disabled(const struct channel *chan)
+{
+       return atomic_read(&chan->record_disabled);
+}
+
 static inline
 unsigned long lib_ring_buffer_get_read_data_size(
                                const struct lib_ring_buffer_config *config,
index 1a3187e64d5653bec97bbaf581606ca4186f13e2..fe208b676e0f035f8bd4b9794ca94f79f24ca8dd 100644 (file)
@@ -62,6 +62,8 @@ struct channel {
        int cpu_hp_enable:1;                    /* Enable CPU hotplug notif. */
        int hp_iter_enable:1;                   /* Enable hp iter notif. */
        wait_queue_head_t read_wait;            /* reader wait queue */
+       wait_queue_head_t hp_wait;              /* CPU hotplug wait queue */
+       int finalized;                          /* Has channel been finalized */
        struct channel_iter iter;               /* Channel read-side iterator */
        struct kref ref;                        /* Reference count */
 };
index 1931414b067a4c8836b1cece70be5c5ad45ad8a7..ecc72ab84f7e3143f930e77cac0bc6ad3de16a8e 100644 (file)
@@ -410,6 +410,7 @@ int __cpuinit lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
        case CPU_DOWN_FAILED_FROZEN:
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
+               wake_up_interruptible(&chan->hp_wait);
                lib_ring_buffer_start_switch_timer(buf);
                lib_ring_buffer_start_read_timer(buf);
                return NOTIFY_OK;
@@ -626,6 +627,7 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config,
        chan->read_timer_interval = usecs_to_jiffies(read_timer_interval);
        kref_init(&chan->ref);
        init_waitqueue_head(&chan->read_wait);
+       init_waitqueue_head(&chan->hp_wait);
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
 #if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER)
@@ -748,6 +750,8 @@ void *channel_destroy(struct channel *chan)
                ACCESS_ONCE(buf->finalized) = 1;
                wake_up_interruptible(&buf->read_wait);
        }
+       ACCESS_ONCE(chan->finalized) = 1;
+       wake_up_interruptible(&chan->hp_wait);
        wake_up_interruptible(&chan->read_wait);
        kref_put(&chan->ref, channel_release);
        priv = chan->backend.priv;
index 6a9fb469ca4f73ccc4940b755a3607ec4bf3784d..fecef217c8bfdd9a51587f930a5dc7c2dcecac64 100644 (file)
@@ -86,19 +86,24 @@ unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait)
        struct lib_ring_buffer *buf = filp->private_data;
        struct channel *chan = buf->backend.chan;
        const struct lib_ring_buffer_config *config = chan->backend.config;
-       int finalized;
+       int finalized, disabled;
 
        if (filp->f_mode & FMODE_READ) {
-               poll_wait_set_exclusive(wait);
+               init_poll_funcptr(wait, wrapper_pollwait_exclusive);
                poll_wait(filp, &buf->read_wait, wait);
 
                finalized = lib_ring_buffer_is_finalized(config, buf);
+               disabled = lib_ring_buffer_channel_is_disabled(chan);
+
                /*
                 * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
                 * finalized load before offsets loads.
                 */
                WARN_ON(atomic_long_read(&buf->active_readers) != 1);
 retry:
+               if (disabled)
+                       return POLLERR;
+
                if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf), chan)
                  - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf), chan)
                  == 0) {
@@ -159,6 +164,9 @@ long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd, unsigned long ar
        struct channel *chan = buf->backend.chan;
        const struct lib_ring_buffer_config *config = chan->backend.config;
 
+       if (lib_ring_buffer_channel_is_disabled(chan))
+               return -EIO;
+
        switch (cmd) {
        case RING_BUFFER_SNAPSHOT:
                return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
@@ -250,6 +258,9 @@ long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
        struct channel *chan = buf->backend.chan;
        const struct lib_ring_buffer_config *config = chan->backend.config;
 
+       if (lib_ring_buffer_channel_is_disabled(chan))
+               return -EIO;
+
        switch (cmd) {
        case RING_BUFFER_SNAPSHOT:
                return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
index 8f00bb1bd0067437170efa7aaecd23df8768212e..634694b968d4c06f6cf5066b684c80efc10d68d9 100644 (file)
@@ -45,6 +45,17 @@ struct lttng_ctx_field *lttng_append_context(struct lttng_ctx **ctx_p)
 }
 EXPORT_SYMBOL_GPL(lttng_append_context);
 
+void lttng_remove_context_field(struct lttng_ctx **ctx_p,
+                               struct lttng_ctx_field *field)
+{
+       struct lttng_ctx *ctx;
+
+       ctx = *ctx_p;
+       ctx->nr_fields--;
+       memset(&ctx->fields[ctx->nr_fields], 0, sizeof(struct lttng_ctx_field));
+}
+EXPORT_SYMBOL_GPL(lttng_remove_context_field);
+
 void lttng_destroy_context(struct lttng_ctx *ctx)
 {
        int i;
index 1314c663e77e321fc32bc638d0183bacc74f727a..f8bee492c3daae003dc7702aaf638ad3af99d0d3 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/slab.h>
 #include "wrapper/vmalloc.h"   /* for wrapper_vmalloc_sync_all() */
 #include "wrapper/ringbuffer/vfs.h"
+#include "wrapper/poll.h"
 #include "ltt-debugfs-abi.h"
 #include "ltt-events.h"
 #include "ltt-tracer.h"
@@ -551,8 +552,6 @@ long lttng_metadata_ioctl(struct file *file, unsigned int cmd, unsigned long arg
        }
 }
 
-/* TODO: poll */
-#if 0
 /**
  *     lttng_channel_poll - lttng stream addition/removal monitoring
  *
@@ -565,11 +564,13 @@ unsigned int lttng_channel_poll(struct file *file, poll_table *wait)
        unsigned int mask = 0;
 
        if (file->f_mode & FMODE_READ) {
-               poll_wait_set_exclusive(wait);
-               poll_wait(file, &channel->notify_wait, wait);
+               init_poll_funcptr(wait, wrapper_pollwait_exclusive);
+               poll_wait(file, channel->ops->get_hp_wait_queue(channel->chan),
+                         wait);
 
-               /* TODO: identify when the channel is being finalized. */
-               if (finalized)
+               if (channel->ops->is_disabled(channel->chan))
+                       return POLLERR;
+               if (channel->ops->is_finalized(channel->chan))
                        return POLLHUP;
                else
                        return POLLIN | POLLRDNORM;
@@ -577,7 +578,6 @@ unsigned int lttng_channel_poll(struct file *file, poll_table *wait)
        return mask;
 
 }
-#endif //0
 
 static
 int lttng_channel_release(struct inode *inode, struct file *file)
@@ -591,10 +591,7 @@ int lttng_channel_release(struct inode *inode, struct file *file)
 
 static const struct file_operations lttng_channel_fops = {
        .release = lttng_channel_release,
-/* TODO */
-#if 0
        .poll = lttng_channel_poll,
-#endif //0
        .unlocked_ioctl = lttng_channel_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = lttng_channel_ioctl,
index e3c252d33962b52cdf3e86b90233bec53e2c92f9..40e8e459183efacae2744fcc6a6071bd9625e5b1 100644 (file)
@@ -169,7 +169,6 @@ struct ltt_channel *ltt_channel_create(struct ltt_session *session,
        if (!chan)
                goto nomem;
        chan->session = session;
-       init_waitqueue_head(&chan->notify_wait);
        chan->id = session->free_chan_id++;
        /*
         * Note: the channel creation op already writes into the packet
@@ -385,7 +384,7 @@ int lttng_metadata_printf(struct ltt_session *session,
                 * we need to bail out after timeout or being
                 * interrupted.
                 */
-               waitret = wait_event_interruptible_timeout(*chan->ops->get_reader_wait_queue(chan),
+               waitret = wait_event_interruptible_timeout(*chan->ops->get_reader_wait_queue(chan->chan),
                        ({
                                ret = chan->ops->event_reserve(&ctx, 0);
                                ret != -ENOBUFS || !ret;
index 6244e8c5a49df5427226f8d9d8163a9a237d5337..05eff540726defe0b8e14d3a8c28785e65aef8bc 100644 (file)
@@ -128,7 +128,8 @@ struct lttng_ctx_field {
        union {
                struct {
                        struct perf_event **e;  /* per-cpu array */
-                       struct list_head head;
+                       struct notifier_block nb;
+                       int hp_enable;
                        struct perf_event_attr *attr;
                } perf_counter;
        } u;
@@ -201,7 +202,10 @@ struct ltt_channel_ops {
         * may change due to concurrent writes.
         */
        size_t (*packet_avail_size)(struct channel *chan);
-       wait_queue_head_t *(*get_reader_wait_queue)(struct ltt_channel *chan);
+       wait_queue_head_t *(*get_reader_wait_queue)(struct channel *chan);
+       wait_queue_head_t *(*get_hp_wait_queue)(struct channel *chan);
+       int (*is_finalized)(struct channel *chan);
+       int (*is_disabled)(struct channel *chan);
 };
 
 struct ltt_channel {
@@ -213,7 +217,6 @@ struct ltt_channel {
        struct file *file;              /* File associated to channel */
        unsigned int free_event_id;     /* Next event ID to allocate */
        struct list_head list;          /* Channel list */
-       wait_queue_head_t notify_wait;  /* Channel addition notif. waitqueue */
        struct ltt_channel_ops *ops;
        int header_type;                /* 0: unset, 1: compact, 2: large */
        int metadata_dumped:1;
@@ -273,6 +276,8 @@ void ltt_event_put(const struct lttng_event_desc *desc);
 int ltt_probes_init(void);
 void ltt_probes_exit(void);
 struct lttng_ctx_field *lttng_append_context(struct lttng_ctx **ctx);
+void lttng_remove_context_field(struct lttng_ctx **ctx,
+                               struct lttng_ctx_field *field);
 void lttng_destroy_context(struct lttng_ctx *ctx);
 int lttng_add_pid_to_ctx(struct lttng_ctx **ctx);
 int lttng_add_comm_to_ctx(struct lttng_ctx **ctx);
index 1c9308e04f7151e9d38db7aeb83609bdca8c51c3..0d8051ece35bd5ec2e42e3641524b1c5c6a993fb 100644 (file)
@@ -455,9 +455,27 @@ void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
 }
 
 static
-wait_queue_head_t *ltt_get_reader_wait_queue(struct ltt_channel *chan)
+wait_queue_head_t *ltt_get_reader_wait_queue(struct channel *chan)
 {
-       return &chan->chan->read_wait;
+       return &chan->read_wait;
+}
+
+static
+wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
+{
+       return &chan->hp_wait;
+}
+
+static
+int ltt_is_finalized(struct channel *chan)
+{
+       return lib_ring_buffer_channel_is_finalized(chan);
+}
+
+static
+int ltt_is_disabled(struct channel *chan)
+{
+       return lib_ring_buffer_channel_is_disabled(chan);
 }
 
 static struct ltt_transport ltt_relay_transport = {
@@ -473,6 +491,9 @@ static struct ltt_transport ltt_relay_transport = {
                .event_write = ltt_event_write,
                .packet_avail_size = NULL,      /* Would be racy anyway */
                .get_reader_wait_queue = ltt_get_reader_wait_queue,
+               .get_hp_wait_queue = ltt_get_hp_wait_queue,
+               .is_finalized = ltt_is_finalized,
+               .is_disabled = ltt_is_disabled,
        },
 };
 
index a5ce2062652b9681394711fb48436569b4b20c27..8b1079d98c7c930dced941601533b9a407a272d1 100644 (file)
@@ -220,9 +220,27 @@ size_t ltt_packet_avail_size(struct channel *chan)
 }
 
 static
-wait_queue_head_t *ltt_get_reader_wait_queue(struct ltt_channel *chan)
+wait_queue_head_t *ltt_get_reader_wait_queue(struct channel *chan)
 {
-       return &chan->chan->read_wait;
+       return &chan->read_wait;
+}
+
+static
+wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
+{
+       return &chan->hp_wait;
+}
+
+static
+int ltt_is_finalized(struct channel *chan)
+{
+       return lib_ring_buffer_channel_is_finalized(chan);
+}
+
+static
+int ltt_is_disabled(struct channel *chan)
+{
+       return lib_ring_buffer_channel_is_disabled(chan);
 }
 
 static struct ltt_transport ltt_relay_transport = {
@@ -238,6 +256,9 @@ static struct ltt_transport ltt_relay_transport = {
                .event_write = ltt_event_write,
                .packet_avail_size = ltt_packet_avail_size,
                .get_reader_wait_queue = ltt_get_reader_wait_queue,
+               .get_hp_wait_queue = ltt_get_hp_wait_queue,
+               .is_finalized = ltt_is_finalized,
+               .is_disabled = ltt_is_disabled,
        },
 };
 
index 20d49d580760b8c4ed8cd15ce239bb6c2d77b17b..56e0d25035ae7f6a09eec09114ced55524b81159 100644 (file)
 #include "wrapper/vmalloc.h"
 #include "ltt-tracer.h"
 
-/*
- * TODO: Add CPU hotplug support.
- */
-
-static DEFINE_MUTEX(perf_counter_mutex);
-static LIST_HEAD(perf_counter_contexts);
-
 static
 size_t perf_counter_get_size(size_t offset)
 {
@@ -43,8 +36,19 @@ void perf_counter_record(struct lttng_ctx_field *field,
        uint64_t value;
 
        event = field->u.perf_counter.e[ctx->cpu];
-       event->pmu->read(event);
-       value = local64_read(&event->count);
+       if (likely(event)) {
+               event->pmu->read(event);
+               value = local64_read(&event->count);
+       } else {
+               /*
+                * Perf chooses not to be clever and not to support enabling a
+                * perf counter before the cpu is brought up. Therefore, we need
+                * to support having events coming (e.g. scheduler events)
+                * before the counter is setup. Write an arbitrary 0 in this
+                * case.
+                */
+               value = 0;
+       }
        lib_ring_buffer_align_ctx(ctx, ltt_alignof(value));
        chan->ops->event_write(ctx, &value, sizeof(value));
 }
@@ -62,16 +66,71 @@ void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
        struct perf_event **events = field->u.perf_counter.e;
        int cpu;
 
-       mutex_lock(&perf_counter_mutex);
-       list_del(&field->u.perf_counter.head);
+       get_online_cpus();
        for_each_online_cpu(cpu)
                perf_event_release_kernel(events[cpu]);
-       mutex_unlock(&perf_counter_mutex);
+       put_online_cpus();
+#ifdef CONFIG_HOTPLUG_CPU
+       unregister_cpu_notifier(&field->u.perf_counter.nb);
+#endif
        kfree(field->event_field.name);
        kfree(field->u.perf_counter.attr);
        kfree(events);
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+
+/**
+ *     lttng_perf_counter_hp_callback - CPU hotplug callback
+ *     @nb: notifier block
+ *     @action: hotplug action to take
+ *     @hcpu: CPU number
+ *
+ *     Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
+ *
+ * We can setup perf counters when the cpu is online (up prepare seems to be too
+ * soon).
+ */
+static
+int __cpuinit lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
+                                                unsigned long action,
+                                                void *hcpu)
+{
+       unsigned int cpu = (unsigned long) hcpu;
+       struct lttng_ctx_field *field =
+               container_of(nb, struct lttng_ctx_field, u.perf_counter.nb);
+       struct perf_event **events = field->u.perf_counter.e;
+       struct perf_event_attr *attr = field->u.perf_counter.attr;
+       struct perf_event *pevent;
+
+       if (!field->u.perf_counter.hp_enable)
+               return NOTIFY_OK;
+
+       switch (action) {
+       case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
+               pevent = perf_event_create_kernel_counter(attr,
+                               cpu, NULL, overflow_callback);
+               if (!pevent)
+                       return NOTIFY_BAD;
+               barrier();      /* Create perf counter before setting event */
+               events[cpu] = pevent;
+               break;
+       case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
+       case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
+               pevent = events[cpu];
+               events[cpu] = NULL;
+               barrier();      /* NULLify event before perf counter teardown */
+               perf_event_release_kernel(pevent);
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+#endif
+
 int lttng_add_perf_counter_to_ctx(uint32_t type,
                                  uint64_t config,
                                  const char *name,
@@ -100,17 +159,6 @@ int lttng_add_perf_counter_to_ctx(uint32_t type,
        attr->pinned = 1;
        attr->disabled = 0;
 
-       mutex_lock(&perf_counter_mutex);
-
-       for_each_online_cpu(cpu) {
-               events[cpu] = perf_event_create_kernel_counter(attr,
-                                       cpu, NULL, overflow_callback);
-               if (!events[cpu]) {
-                       ret = -EINVAL;
-                       goto name_alloc_error;
-               }
-       }
-
        name_alloc = kstrdup(name, GFP_KERNEL);
        if (!name_alloc) {
                ret = -ENOMEM;
@@ -120,8 +168,27 @@ int lttng_add_perf_counter_to_ctx(uint32_t type,
        field = lttng_append_context(ctx);
        if (!field) {
                ret = -ENOMEM;
-               goto error;
+               goto append_context_error;
+       }
+
+#ifdef CONFIG_HOTPLUG_CPU
+       field->u.perf_counter.nb.notifier_call =
+               lttng_perf_counter_cpu_hp_callback;
+       field->u.perf_counter.nb.priority = 0;
+       register_cpu_notifier(&field->u.perf_counter.nb);
+#endif
+
+       get_online_cpus();
+       for_each_online_cpu(cpu) {
+               events[cpu] = perf_event_create_kernel_counter(attr,
+                                       cpu, NULL, overflow_callback);
+               if (!events[cpu]) {
+                       ret = -EINVAL;
+                       goto counter_error;
+               }
        }
+       put_online_cpus();
+
        field->destroy = lttng_destroy_perf_counter_field;
 
        field->event_field.name = name_alloc;
@@ -136,21 +203,24 @@ int lttng_add_perf_counter_to_ctx(uint32_t type,
        field->record = perf_counter_record;
        field->u.perf_counter.e = events;
        field->u.perf_counter.attr = attr;
-
-       list_add(&field->u.perf_counter.head, &perf_counter_contexts);
-       mutex_unlock(&perf_counter_mutex);
+       field->u.perf_counter.hp_enable = 1;
 
        wrapper_vmalloc_sync_all();
        return 0;
 
-error:
-       kfree(name_alloc);
-name_alloc_error:
+counter_error:
        for_each_online_cpu(cpu) {
                if (events[cpu])
                        perf_event_release_kernel(events[cpu]);
        }
-       mutex_unlock(&perf_counter_mutex);
+       put_online_cpus();
+#ifdef CONFIG_HOTPLUG_CPU
+       unregister_cpu_notifier(&field->u.perf_counter.nb);
+#endif
+       lttng_remove_context_field(ctx, field);
+append_context_error:
+       kfree(name_alloc);
+name_alloc_error:
        kfree(attr);
 error_attr:
        kfree(events);
diff --git a/wrapper/poll.c b/wrapper/poll.c
new file mode 100644 (file)
index 0000000..f3f76fe
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
+ *
+ * wrapper around poll __pollwait and poll_get_entry. Using KALLSYMS to get its
+ * address when available, else we need to have a kernel that exports this
+ * function to GPL modules.
+ *
+ * Dual LGPL v2.1/GPL v2 license.
+ */
+
+#ifdef CONFIG_KALLSYMS
+
+#include <linux/kallsyms.h>
+#include <linux/poll.h>
+
+struct poll_table_entry;
+struct splice_pipe_desc;
+
+static
+void (*__pollwait_sym)(struct file *filp, wait_queue_head_t *wait_address,
+               poll_table *p);
+static
+struct poll_table_entry *(*poll_get_entry_sym)(struct poll_wqueues *p);
+
+void wrapper_pollwait_exclusive(struct file *filp,
+                        wait_queue_head_t *wait_address,
+                        poll_table *p)
+
+{
+       struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
+       struct poll_table_entry *entry;
+
+       if (!poll_get_entry_sym)
+               poll_get_entry_sym = (void *) kallsyms_lookup_name("poll_get_entry");
+       if (!poll_get_entry_sym) {
+               printk(KERN_WARNING "LTTng: poll_get_entry_sym symbol lookup failed.\n");
+               return;
+       }
+       entry = poll_get_entry_sym(pwq);
+
+       if (!__pollwait_sym)
+               __pollwait_sym = (void *) kallsyms_lookup_name("__pollwait");
+       if (!__pollwait_sym) {
+               printk(KERN_WARNING "LTTng: __pollwait symbol lookup failed.\n");
+               return;
+       }
+       return __pollwait_sym(filp, wait_address, p);
+}
+
+#else
+
+#include <linux/poll.h>
+
+ssize_t wrapper_pollwait_exclusive(struct file *filp,
+                       wait_queue_head_t *wait_address,
+                       poll_table *p)
+{
+       return pollwait_exclusive(filp, wait_address, p);
+}
+
+#endif
index 416ce5740da7a6e15b88401e209c49dc54860310..ae524730cda3e184c34293e69d8bd7beec4fa5d3 100644 (file)
@@ -1,20 +1,20 @@
+#ifndef _LTTNG_WRAPPER_POLL_H
+#define _LTTNG_WRAPPER_POLL_H
+
 /*
- * wrapper/poll.h
+ * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
  *
- * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * wrapper around poll __pollwait and poll_get_entry. Using KALLSYMS to get its
+ * address when available, else we need to have a kernel that exports this
+ * function to GPL modules.
  *
  * Dual LGPL v2.1/GPL v2 license.
  */
 
-#ifndef CONFIG_LIB_RING_BUFFER
 #include <linux/poll.h>
 
-#warning "poll_wait_set_exclusive() is defined as no-op. Will increase LTTng overhead. Please consider using the LTTng kernel tree for better results."
-
-/*
- * Will cause higher overhead when signalling all possible reader threads when a
- * buffer is ready to be consumed.
- */
-#define poll_wait_set_exclusive(poll_table)
+void wrapper_pollwait_exclusive(struct file *filp,
+                        wait_queue_head_t *wait_address,
+                        poll_table *p);
 
-#endif
+#endif /* _LTTNG_WRAPPER_POLL_H */
diff --git a/wrapper/splice.c b/wrapper/splice.c
new file mode 100644 (file)
index 0000000..edc499c
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
+ *
+ * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
+ * available, else we need to have a kernel that exports this function to GPL
+ * modules.
+ *
+ * Dual LGPL v2.1/GPL v2 license.
+ */
+
+#ifdef CONFIG_KALLSYMS
+
+#include <linux/kallsyms.h>
+#include <linux/fs.h>
+#include <linux/splice.h>
+
+static
+ssize_t (*splice_to_pipe_sym)(struct pipe_inode_info *pipe,
+                             struct splice_pipe_desc *spd);
+
+ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
+                              struct splice_pipe_desc *spd)
+{
+       if (!splice_to_pipe_sym)
+               splice_to_pipe_sym = (void *) kallsyms_lookup_name("splice_to_pipe");
+       if (splice_to_pipe_sym) {
+               return splice_to_pipe_sym(pipe, spd);
+       } else {
+               printk(KERN_WARNING "LTTng: splice_to_pipe symbol lookup failed.\n");
+               return -ENOSYS;
+       }
+}
+
+#else
+
+#include <linux/fs.h>
+#include <linux/splice.h>
+
+ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
+                              struct splice_pipe_desc *spd)
+{
+       return splice_to_pipe(pipe, spd);
+}
+
+#endif
index 5f400747cb1bcb5c3c5070884a330b8de3a3fb3b..d419847d92578e031347b5ffc6eaba85f2757cdd 100644 (file)
  * Dual LGPL v2.1/GPL v2 license.
  */
 
-#ifdef CONFIG_KALLSYMS
-
-#include <linux/kallsyms.h>
-#include <linux/splice.h>
-
-static inline
-ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
-                              struct splice_pipe_desc *spd)
-{
-       ssize_t (*splice_to_pipe_sym)(struct pipe_inode_info *pipe,
-                                     struct splice_pipe_desc *spd);
-
-       splice_to_pipe_sym = (void *) kallsyms_lookup_name("splice_to_pipe");
-       if (splice_to_pipe_sym) {
-               return splice_to_pipe_sym(pipe, spd);
-       } else {
-               printk(KERN_WARNING "LTTng: splice_to_pipe symbol lookup failed.\n");
-               return -ENOSYS;
-       }
-}
-#else
-
 #include <linux/splice.h>
 
-static inline
 ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
-                              struct splice_pipe_desc *spd)
-{
-       return splice_to_pipe(pipe, spd);
-}
-#endif
+                              struct splice_pipe_desc *spd);
 
 #endif /* _LTT_WRAPPER_SPLICE_H */
This page took 0.037956 seconds and 4 git commands to generate.