* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#define _LGPL_SOURCE
#include <sys/types.h>
#include <unistd.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
+#include <stdbool.h>
#include <sys/mman.h>
#include <sys/syscall.h>
-#include <linux/perf_event.h>
#include <lttng/ust-events.h>
#include <lttng/ust-tracer.h>
#include <lttng/ringbuffer-config.h>
#include <urcu/ref.h>
#include <usterr-signal-safe.h>
#include <signal.h>
+#include <urcu/tls-compat.h>
+#include "perf_event.h"
#include "lttng-tracer-core.h"
/*
struct perf_event_mmap_page *pc;
struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */
struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */
+ int fd; /* Perf FD */
};
struct lttng_perf_counter_thread {
static pthread_key_t perf_counter_key;
+/*
+ * lttng_perf_lock - Protect lttng-ust perf counter data structures
+ *
+ * Nests within the ust_lock, and therefore within the libc dl lock.
+ * Therefore, we need to fixup the TLS before nesting into this lock.
+ * Nests inside RCU bp read-side lock. Protects against concurrent
+ * fork.
+ */
+static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * Cancel state when grabbing the ust_perf_mutex. Saved when locking,
+ * restored on unlock. Protected by ust_perf_mutex.
+ */
+static int ust_perf_saved_cancelstate;
+
+/*
+ * Track whether we are tracing from a signal handler nested on an
+ * application thread.
+ */
+static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
+
+/*
+ * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ */
+void lttng_ust_fixup_perf_counter_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
+}
+
+void lttng_perf_lock(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, oldstate;
+
+ ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (!URCU_TLS(ust_perf_mutex_nest)++) {
+ /*
+ * Ensure the compiler don't move the store after the close()
+ * call in case close() would be marked as leaf.
+ */
+ cmm_barrier();
+ pthread_mutex_lock(&ust_perf_mutex);
+ ust_perf_saved_cancelstate = oldstate;
+ }
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+}
+
+void lttng_perf_unlock(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, newstate, oldstate;
+ bool restore_cancel = false;
+
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ /*
+ * Ensure the compiler don't move the store before the close()
+ * call, in case close() would be marked as leaf.
+ */
+ cmm_barrier();
+ if (!--URCU_TLS(ust_perf_mutex_nest)) {
+ newstate = ust_perf_saved_cancelstate;
+ restore_cancel = true;
+ pthread_mutex_unlock(&ust_perf_mutex);
+ }
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (restore_cancel) {
+ ret = pthread_setcancelstate(newstate, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ }
+}
+
static
-size_t perf_counter_get_size(size_t offset)
+size_t perf_counter_get_size(struct lttng_ctx_field *field, size_t offset)
{
size_t size = 0;
return size;
}
+static
+uint64_t read_perf_counter_syscall(
+ struct lttng_perf_counter_thread_field *thread_field)
+{
+ uint64_t count;
+
+ if (caa_unlikely(thread_field->fd < 0))
+ return 0;
+
+ if (caa_unlikely(read(thread_field->fd, &count, sizeof(count))
+ < sizeof(count)))
+ return 0;
+
+ return count;
+}
+
#if defined(__x86_64__) || defined(__i386__)
static
return low | ((uint64_t) high) << 32;
}
-#else /* defined(__x86_64__) || defined(__i386__) */
-
-#error "Perf event counters are only supported on x86 so far."
-
-#endif /* #else defined(__x86_64__) || defined(__i386__) */
+static
+bool has_rdpmc(struct perf_event_mmap_page *pc)
+{
+ if (caa_unlikely(!pc->cap_bit0_is_deprecated))
+ return false;
+ /* Since Linux kernel 3.12. */
+ return pc->cap_user_rdpmc;
+}
static
-uint64_t read_perf_counter(struct perf_event_mmap_page *pc)
+uint64_t arch_read_perf_counter(
+ struct lttng_perf_counter_thread_field *thread_field)
{
uint32_t seq, idx;
uint64_t count;
+ struct perf_event_mmap_page *pc = thread_field->pc;
if (caa_unlikely(!pc))
return 0;
cmm_barrier();
idx = pc->index;
- if (idx)
- count = pc->offset + rdpmc(idx - 1);
- else
- count = 0;
-
+ if (caa_likely(has_rdpmc(pc) && idx)) {
+ int64_t pmcval;
+
+ pmcval = rdpmc(idx - 1);
+ /* Sign-extend the pmc register result. */
+ pmcval <<= 64 - pc->pmc_width;
+ pmcval >>= 64 - pc->pmc_width;
+ count = pc->offset + pmcval;
+ } else {
+ /* Fall-back on system call if rdpmc cannot be used. */
+ return read_perf_counter_syscall(thread_field);
+ }
cmm_barrier();
} while (CMM_LOAD_SHARED(pc->lock) != seq);
return count;
}
+static
+int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
+{
+ struct perf_event_mmap_page *pc = thread_field->pc;
+
+ if (!pc)
+ return 0;
+ return !has_rdpmc(pc);
+}
+
+#else
+
+/* Generic (slow) implementation using a read system call. */
+static
+uint64_t arch_read_perf_counter(
+ struct lttng_perf_counter_thread_field *thread_field)
+{
+ return read_perf_counter_syscall(thread_field);
+}
+
+static
+int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
+{
+ return 1;
+}
+
+#endif
+
static
int sys_perf_event_open(struct perf_event_attr *attr,
pid_t pid, int cpu, int group_fd,
}
static
-struct perf_event_mmap_page *setup_perf(struct perf_event_attr *attr)
+int open_perf_fd(struct perf_event_attr *attr)
{
- void *perf_addr;
int fd;
fd = sys_perf_event_open(attr, 0, -1, -1, 0);
if (fd < 0)
- return NULL;
+ return -1;
+
+ return fd;
+}
+
+static
+void close_perf_fd(int fd)
+{
+ int ret;
+
+ if (fd < 0)
+ return;
+
+ ret = close(fd);
+ if (ret) {
+ perror("Error closing LTTng-UST perf memory mapping FD");
+ }
+}
+
+static void setup_perf(struct lttng_perf_counter_thread_field *thread_field)
+{
+ void *perf_addr;
perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
- PROT_READ, MAP_SHARED, fd, 0);
+ PROT_READ, MAP_SHARED, thread_field->fd, 0);
if (perf_addr == MAP_FAILED)
- return NULL;
- close(fd);
- return perf_addr;
+ perf_addr = NULL;
+ thread_field->pc = perf_addr;
+
+ if (!arch_perf_keep_fd(thread_field)) {
+ close_perf_fd(thread_field->fd);
+ thread_field->fd = -1;
+ }
}
static
if (!thread_field)
abort();
thread_field->field = perf_field;
- thread_field->pc = setup_perf(&perf_field->attr);
- /* Note: thread_field->pc can be NULL if setup_perf() fails. */
- ust_lock_nocheck();
+ thread_field->fd = open_perf_fd(&perf_field->attr);
+ if (thread_field->fd >= 0)
+ setup_perf(thread_field);
+ /*
+ * Note: thread_field->pc can be NULL if setup_perf() fails.
+ * Also, thread_field->fd can be -1 if open_perf_fd() fails.
+ */
+ lttng_perf_lock();
cds_list_add_rcu(&thread_field->rcu_field_node,
&perf_thread->rcu_field_list);
cds_list_add(&thread_field->thread_field_node,
&perf_field->thread_field_list);
- ust_unlock();
+ lttng_perf_unlock();
skip:
ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
if (ret)
perf_field = field->u.perf_counter;
perf_thread_field = get_thread_field(perf_field);
- return read_perf_counter(perf_thread_field->pc);
+ return arch_read_perf_counter(perf_thread_field);
}
static
static
void perf_counter_get_value(struct lttng_ctx_field *field,
- union lttng_ctx_value *value)
+ struct lttng_ctx_value *value)
{
- uint64_t v;
-
- v = wrapper_perf_counter_read(field);
- value->s64 = v;
+ value->u.s64 = wrapper_perf_counter_read(field);
}
-/* Called with UST lock held */
+/* Called with perf lock held */
static
void lttng_destroy_perf_thread_field(
struct lttng_perf_counter_thread_field *thread_field)
{
+ close_perf_fd(thread_field->fd);
unmap_perf_page(thread_field->pc);
cds_list_del_rcu(&thread_field->rcu_field_node);
cds_list_del(&thread_field->thread_field_node);
struct lttng_perf_counter_thread *perf_thread = _key;
struct lttng_perf_counter_thread_field *pos, *p;
- ust_lock_nocheck();
+ lttng_perf_lock();
cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list,
rcu_field_node)
lttng_destroy_perf_thread_field(pos);
- ust_unlock();
+ lttng_perf_unlock();
free(perf_thread);
}
/*
* This put is performed when no threads can concurrently
* perform a "get" concurrently, thanks to urcu-bp grace
- * period.
+ * period. Holding the lttng perf lock protects against
+ * concurrent modification of the per-thread thread field
+ * list.
*/
+ lttng_perf_lock();
cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list,
thread_field_node)
lttng_destroy_perf_thread_field(pos);
+ lttng_perf_unlock();
free(perf_field);
}
+#ifdef __ARM_ARCH_7A__
+
+static
+int perf_get_exclude_kernel(void)
+{
+ return 0;
+}
+
+#else /* __ARM_ARCH_7A__ */
+
+static
+int perf_get_exclude_kernel(void)
+{
+ return 1;
+}
+
+#endif /* __ARM_ARCH_7A__ */
+
/* Called with UST lock held */
int lttng_add_perf_counter_to_ctx(uint32_t type,
uint64_t config,
{
struct lttng_ctx_field *field;
struct lttng_perf_counter_field *perf_field;
- struct perf_event_mmap_page *tmp_pc;
char *name_alloc;
int ret;
perf_field->attr.type = type;
perf_field->attr.config = config;
- perf_field->attr.exclude_kernel = 1;
+ perf_field->attr.exclude_kernel = perf_get_exclude_kernel();
CDS_INIT_LIST_HEAD(&perf_field->thread_field_list);
field->u.perf_counter = perf_field;
/* Ensure that this perf counter can be used in this process. */
- tmp_pc = setup_perf(&perf_field->attr);
- if (!tmp_pc) {
+ ret = open_perf_fd(&perf_field->attr);
+ if (ret < 0) {
ret = -ENODEV;
goto setup_error;
}
- unmap_perf_page(tmp_pc);
+ close_perf_fd(ret);
/*
* Contexts can only be added before tracing is started, so we
* the field here.
*/
+ lttng_context_update(*ctx);
return 0;
setup_error: