+ switch (tracker_type) {
+ case TRACKER_PID:
+ return &session->pid_tracker;
+ case TRACKER_VPID:
+ return &session->vpid_tracker;
+ case TRACKER_UID:
+ return &session->uid_tracker;
+ case TRACKER_VUID:
+ return &session->vuid_tracker;
+ case TRACKER_GID:
+ return &session->gid_tracker;
+ case TRACKER_VGID:
+ return &session->vgid_tracker;
+ default:
+ WARN_ON_ONCE(1);
+ return NULL;
+ }
+}
+
+int lttng_session_track_id(struct lttng_session *session,
+ enum tracker_type tracker_type, int id)
+{
+ struct lttng_id_tracker *tracker;
+ int ret;
+
+ tracker = get_tracker(session, tracker_type);
+ if (!tracker)
+ return -EINVAL;
+ if (id < -1)
+ return -EINVAL;
+ mutex_lock(&sessions_mutex);
+ if (id == -1) {
+ /* track all ids: destroy tracker. */
+ lttng_id_tracker_destroy(tracker, true);
+ ret = 0;
+ } else {
+ ret = lttng_id_tracker_add(tracker, id);
+ }
+ mutex_unlock(&sessions_mutex);
+ return ret;
+}
+
+int lttng_session_untrack_id(struct lttng_session *session,
+ enum tracker_type tracker_type, int id)
+{
+ struct lttng_id_tracker *tracker;
+ int ret;
+
+ tracker = get_tracker(session, tracker_type);
+ if (!tracker)
+ return -EINVAL;
+ if (id < -1)
+ return -EINVAL;
+ mutex_lock(&sessions_mutex);
+ if (id == -1) {
+ /* untrack all ids: replace by empty tracker. */
+ ret = lttng_id_tracker_empty_set(tracker);
+ } else {
+ ret = lttng_id_tracker_del(tracker, id);
+ }
+ mutex_unlock(&sessions_mutex);
+ return ret;
+}
+
+static
+void *id_list_start(struct seq_file *m, loff_t *pos)
+{
+ struct lttng_id_tracker *id_tracker = m->private;
+ struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
+ struct lttng_id_hash_node *e;
+ int iter = 0, i;
+
+ mutex_lock(&sessions_mutex);
+ if (id_tracker_p) {
+ for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
+ struct hlist_head *head = &id_tracker_p->id_hash[i];
+
+ lttng_hlist_for_each_entry(e, head, hlist) {
+ if (iter++ >= *pos)
+ return e;
+ }
+ }
+ } else {
+ /* ID tracker disabled. */
+ if (iter >= *pos && iter == 0) {
+ return id_tracker_p; /* empty tracker */
+ }
+ iter++;
+ }
+ /* End of list */
+ return NULL;
+}
+
+/* Called with sessions_mutex held. */
+static
+void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
+{
+ struct lttng_id_tracker *id_tracker = m->private;
+ struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
+ struct lttng_id_hash_node *e;
+ int iter = 0, i;
+
+ (*ppos)++;
+ if (id_tracker_p) {
+ for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
+ struct hlist_head *head = &id_tracker_p->id_hash[i];
+
+ lttng_hlist_for_each_entry(e, head, hlist) {
+ if (iter++ >= *ppos)
+ return e;
+ }
+ }
+ } else {
+ /* ID tracker disabled. */
+ if (iter >= *ppos && iter == 0)
+ return p; /* empty tracker */
+ iter++;
+ }
+
+ /* End of list */
+ return NULL;
+}
+
+static
+void id_list_stop(struct seq_file *m, void *p)
+{
+ mutex_unlock(&sessions_mutex);
+}
+
+static
+int id_list_show(struct seq_file *m, void *p)
+{
+ struct lttng_id_tracker *id_tracker = m->private;
+ struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
+ int id;
+
+ if (p == id_tracker_p) {
+ /* Tracker disabled. */
+ id = -1;
+ } else {
+ const struct lttng_id_hash_node *e = p;
+
+ id = lttng_id_tracker_get_node_id(e);
+ }
+ switch (id_tracker->tracker_type) {
+ case TRACKER_PID:
+ seq_printf(m, "process { pid = %d; };\n", id);
+ break;
+ case TRACKER_VPID:
+ seq_printf(m, "process { vpid = %d; };\n", id);
+ break;
+ case TRACKER_UID:
+ seq_printf(m, "user { uid = %d; };\n", id);
+ break;
+ case TRACKER_VUID:
+ seq_printf(m, "user { vuid = %d; };\n", id);
+ break;
+ case TRACKER_GID:
+ seq_printf(m, "group { gid = %d; };\n", id);
+ break;
+ case TRACKER_VGID:
+ seq_printf(m, "group { vgid = %d; };\n", id);
+ break;
+ default:
+ seq_printf(m, "UNKNOWN { field = %d };\n", id);
+ }
+ return 0;
+}
+
+static
+const struct seq_operations lttng_tracker_ids_list_seq_ops = {
+ .start = id_list_start,
+ .next = id_list_next,
+ .stop = id_list_stop,
+ .show = id_list_show,
+};
+
+static
+int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, <tng_tracker_ids_list_seq_ops);
+}
+
+static
+int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m = file->private_data;
+ struct lttng_id_tracker *id_tracker = m->private;
+ int ret;
+
+ WARN_ON_ONCE(!id_tracker);
+ ret = seq_release(inode, file);
+ if (!ret)
+ fput(id_tracker->session->file);
+ return ret;
+}
+
+const struct file_operations lttng_tracker_ids_list_fops = {
+ .owner = THIS_MODULE,
+ .open = lttng_tracker_ids_list_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = lttng_tracker_ids_list_release,
+};
+
+int lttng_session_list_tracker_ids(struct lttng_session *session,
+ enum tracker_type tracker_type)
+{
+ struct file *tracker_ids_list_file;
+ struct seq_file *m;
+ int file_fd, ret;
+
+ file_fd = lttng_get_unused_fd();
+ if (file_fd < 0) {
+ ret = file_fd;
+ goto fd_error;
+ }
+
+ tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
+ <tng_tracker_ids_list_fops,
+ NULL, O_RDWR);
+ if (IS_ERR(tracker_ids_list_file)) {
+ ret = PTR_ERR(tracker_ids_list_file);
+ goto file_error;
+ }
+ if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
+ ret = -EOVERFLOW;
+ goto refcount_error;
+ }
+ ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
+ if (ret < 0)
+ goto open_error;
+ m = tracker_ids_list_file->private_data;
+
+ m->private = get_tracker(session, tracker_type);
+ BUG_ON(!m->private);
+ fd_install(file_fd, tracker_ids_list_file);
+
+ return file_fd;
+
+open_error:
+ atomic_long_dec(&session->file->f_count);
+refcount_error:
+ fput(tracker_ids_list_file);
+file_error:
+ put_unused_fd(file_fd);
+fd_error:
+ return ret;
+}
+
+/*
+ * Enabler management.
+ */
+static
+int lttng_match_enabler_star_glob(const char *desc_name,
+ const char *pattern)
+{
+ if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
+ desc_name, LTTNG_SIZE_MAX))
+ return 0;
+ return 1;
+}
+
+static
+int lttng_match_enabler_name(const char *desc_name,
+ const char *name)
+{
+ if (strcmp(desc_name, name))
+ return 0;
+ return 1;
+}
+
+static
+int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
+ struct lttng_enabler *enabler)
+{
+ const char *desc_name, *enabler_name;
+ bool compat = false, entry = false;
+
+ enabler_name = enabler->event_param.name;
+ switch (enabler->event_param.instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ desc_name = desc->name;
+ switch (enabler->type) {
+ case LTTNG_ENABLER_STAR_GLOB:
+ return lttng_match_enabler_star_glob(desc_name, enabler_name);
+ case LTTNG_ENABLER_NAME:
+ return lttng_match_enabler_name(desc_name, enabler_name);
+ default:
+ return -EINVAL;
+ }
+ break;
+ case LTTNG_KERNEL_SYSCALL:
+ desc_name = desc->name;
+ if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
+ desc_name += strlen("compat_");
+ compat = true;
+ }
+ if (!strncmp(desc_name, "syscall_exit_",
+ strlen("syscall_exit_"))) {
+ desc_name += strlen("syscall_exit_");
+ } else if (!strncmp(desc_name, "syscall_entry_",
+ strlen("syscall_entry_"))) {
+ desc_name += strlen("syscall_entry_");
+ entry = true;
+ } else {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+ switch (enabler->event_param.u.syscall.entryexit) {
+ case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
+ break;
+ case LTTNG_KERNEL_SYSCALL_ENTRY:
+ if (!entry)
+ return 0;
+ break;
+ case LTTNG_KERNEL_SYSCALL_EXIT:
+ if (entry)
+ return 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (enabler->event_param.u.syscall.abi) {
+ case LTTNG_KERNEL_SYSCALL_ABI_ALL:
+ break;
+ case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
+ if (compat)
+ return 0;
+ break;
+ case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
+ if (!compat)
+ return 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (enabler->event_param.u.syscall.match) {
+ case LTTNG_SYSCALL_MATCH_NAME:
+ switch (enabler->type) {
+ case LTTNG_ENABLER_STAR_GLOB:
+ return lttng_match_enabler_star_glob(desc_name, enabler_name);
+ case LTTNG_ENABLER_NAME:
+ return lttng_match_enabler_name(desc_name, enabler_name);
+ default:
+ return -EINVAL;
+ }
+ break;
+ case LTTNG_SYSCALL_MATCH_NR:
+ return -EINVAL; /* Not implemented. */
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+}
+
+static
+int lttng_event_match_enabler(struct lttng_event *event,
+ struct lttng_enabler *enabler)
+{
+ if (enabler->event_param.instrumentation != event->instrumentation)
+ return 0;
+ if (lttng_desc_match_enabler(event->desc, enabler)
+ && event->chan == enabler->chan)
+ return 1;
+ else
+ return 0;
+}
+
+static
+struct lttng_enabler_ref *lttng_event_enabler_ref(struct lttng_event *event,
+ struct lttng_enabler *enabler)
+{
+ struct lttng_enabler_ref *enabler_ref;
+
+ list_for_each_entry(enabler_ref,
+ &event->enablers_ref_head, node) {
+ if (enabler_ref->ref == enabler)
+ return enabler_ref;
+ }
+ return NULL;
+}
+
+static
+void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler)
+{
+ struct lttng_session *session = enabler->chan->session;
+ struct lttng_probe_desc *probe_desc;
+ const struct lttng_event_desc *desc;
+ int i;
+ struct list_head *probe_list;
+
+ probe_list = lttng_get_probe_list_head();
+ /*
+ * For each probe event, if we find that a probe event matches
+ * our enabler, create an associated lttng_event if not
+ * already present.
+ */
+ list_for_each_entry(probe_desc, probe_list, head) {
+ for (i = 0; i < probe_desc->nr_events; i++) {
+ int found = 0;
+ struct hlist_head *head;
+ const char *event_name;
+ size_t name_len;
+ uint32_t hash;
+ struct lttng_event *event;
+
+ desc = probe_desc->event_desc[i];
+ if (!lttng_desc_match_enabler(desc, enabler))
+ continue;
+ event_name = desc->name;
+ name_len = strlen(event_name);
+
+ /*
+ * Check if already created.
+ */
+ hash = jhash(event_name, name_len, 0);
+ head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
+ lttng_hlist_for_each_entry(event, head, hlist) {
+ if (event->desc == desc
+ && event->chan == enabler->chan)
+ found = 1;
+ }
+ if (found)
+ continue;
+
+ /*
+ * We need to create an event for this
+ * event probe.
+ */
+ event = _lttng_event_create(enabler->chan,
+ NULL, NULL, desc,
+ LTTNG_KERNEL_TRACEPOINT);
+ if (!event) {
+ printk(KERN_INFO "Unable to create event %s\n",
+ probe_desc->event_desc[i]->name);
+ }
+ }
+ }
+}
+
+static
+void lttng_create_syscall_if_missing(struct lttng_enabler *enabler)
+{
+ int ret;
+
+ ret = lttng_syscalls_register(enabler->chan, NULL);
+ WARN_ON_ONCE(ret);
+}
+
+/*
+ * Create struct lttng_event if it is missing and present in the list of
+ * tracepoint probes.
+ * Should be called with sessions mutex held.
+ */
+static
+void lttng_create_event_if_missing(struct lttng_enabler *enabler)
+{
+ switch (enabler->event_param.instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ lttng_create_tracepoint_if_missing(enabler);
+ break;
+ case LTTNG_KERNEL_SYSCALL:
+ lttng_create_syscall_if_missing(enabler);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+}
+
+/*
+ * Create events associated with an enabler (if not already present),
+ * and add backward reference from the event to the enabler.
+ * Should be called with sessions mutex held.
+ */
+static
+int lttng_enabler_ref_events(struct lttng_enabler *enabler)
+{
+ struct lttng_channel *chan = enabler->chan;
+ struct lttng_session *session = chan->session;
+ struct lttng_event *event;
+
+ if (enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
+ enabler->event_param.u.syscall.entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT &&
+ enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
+ enabler->event_param.u.syscall.match == LTTNG_SYSCALL_MATCH_NAME &&
+ !strcmp(enabler->event_param.name, "*")) {
+ if (enabler->enabled)
+ WRITE_ONCE(chan->syscall_all, 1);
+ else
+ WRITE_ONCE(chan->syscall_all, 0);
+ }
+
+ /* First ensure that probe events are created for this enabler. */
+ lttng_create_event_if_missing(enabler);
+
+ /* For each event matching enabler in session event list. */
+ list_for_each_entry(event, &session->events, list) {
+ struct lttng_enabler_ref *enabler_ref;
+
+ if (!lttng_event_match_enabler(event, enabler))
+ continue;
+ enabler_ref = lttng_event_enabler_ref(event, enabler);
+ if (!enabler_ref) {
+ /*
+ * If no backward ref, create it.
+ * Add backward ref from event to enabler.
+ */
+ enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
+ if (!enabler_ref)
+ return -ENOMEM;
+ enabler_ref->ref = enabler;
+ list_add(&enabler_ref->node,
+ &event->enablers_ref_head);
+ }
+
+ /*
+ * Link filter bytecodes if not linked yet.
+ */
+ lttng_enabler_event_link_bytecode(event, enabler);
+
+ /* TODO: merge event context. */
+ }
+ return 0;
+}
+
+/*
+ * Called at module load: connect the probe on all enablers matching
+ * this event.
+ * Called with sessions lock held.
+ */
+int lttng_fix_pending_events(void)
+{
+ struct lttng_session *session;
+
+ list_for_each_entry(session, &sessions, list)
+ lttng_session_lazy_sync_enablers(session);
+ return 0;
+}
+
+struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
+ struct lttng_kernel_event *event_param,
+ struct lttng_channel *chan)
+{
+ struct lttng_enabler *enabler;
+
+ enabler = kzalloc(sizeof(*enabler), GFP_KERNEL);
+ if (!enabler)
+ return NULL;
+ enabler->type = type;
+ INIT_LIST_HEAD(&enabler->filter_bytecode_head);
+ memcpy(&enabler->event_param, event_param,
+ sizeof(enabler->event_param));
+ enabler->chan = chan;
+ /* ctx left NULL */
+ enabler->enabled = 0;
+ enabler->evtype = LTTNG_TYPE_ENABLER;
+ mutex_lock(&sessions_mutex);
+ list_add(&enabler->node, &enabler->chan->session->enablers_head);
+ lttng_session_lazy_sync_enablers(enabler->chan->session);
+ mutex_unlock(&sessions_mutex);
+ return enabler;
+}
+
+int lttng_enabler_enable(struct lttng_enabler *enabler)
+{
+ mutex_lock(&sessions_mutex);
+ enabler->enabled = 1;
+ lttng_session_lazy_sync_enablers(enabler->chan->session);
+ mutex_unlock(&sessions_mutex);
+ return 0;
+}
+
+int lttng_enabler_disable(struct lttng_enabler *enabler)
+{
+ mutex_lock(&sessions_mutex);
+ enabler->enabled = 0;
+ lttng_session_lazy_sync_enablers(enabler->chan->session);
+ mutex_unlock(&sessions_mutex);
+ return 0;
+}
+
+int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
+ struct lttng_kernel_filter_bytecode __user *bytecode)
+{
+ struct lttng_filter_bytecode_node *bytecode_node;
+ uint32_t bytecode_len;
+ int ret;
+
+ ret = get_user(bytecode_len, &bytecode->len);
+ if (ret)
+ return ret;
+ bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
+ GFP_KERNEL);
+ if (!bytecode_node)
+ return -ENOMEM;
+ ret = copy_from_user(&bytecode_node->bc, bytecode,
+ sizeof(*bytecode) + bytecode_len);
+ if (ret)
+ goto error_free;
+ bytecode_node->enabler = enabler;
+ /* Enforce length based on allocated size */
+ bytecode_node->bc.len = bytecode_len;
+ list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
+ lttng_session_lazy_sync_enablers(enabler->chan->session);
+ return 0;
+
+error_free:
+ kfree(bytecode_node);
+ return ret;
+}
+
+int lttng_event_add_callsite(struct lttng_event *event,
+ struct lttng_kernel_event_callsite __user *callsite)
+{
+
+ switch (event->instrumentation) {
+ case LTTNG_KERNEL_UPROBE:
+ return lttng_uprobes_add_callsite(event, callsite);
+ default:
+ return -EINVAL;
+ }
+}
+
+int lttng_enabler_attach_context(struct lttng_enabler *enabler,
+ struct lttng_kernel_context *context_param)
+{
+ return -ENOSYS;
+}
+
+static
+void lttng_enabler_destroy(struct lttng_enabler *enabler)
+{
+ struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
+
+ /* Destroy filter bytecode */
+ list_for_each_entry_safe(filter_node, tmp_filter_node,
+ &enabler->filter_bytecode_head, node) {
+ kfree(filter_node);
+ }
+
+ /* Destroy contexts */
+ lttng_destroy_context(enabler->ctx);
+
+ list_del(&enabler->node);
+ kfree(enabler);
+}
+
+/*
+ * lttng_session_sync_enablers should be called just before starting a
+ * session.
+ * Should be called with sessions mutex held.
+ */
+static
+void lttng_session_sync_enablers(struct lttng_session *session)
+{
+ struct lttng_enabler *enabler;
+ struct lttng_event *event;
+
+ list_for_each_entry(enabler, &session->enablers_head, node)
+ lttng_enabler_ref_events(enabler);
+ /*
+ * For each event, if at least one of its enablers is enabled,
+ * and its channel and session transient states are enabled, we
+ * enable the event, else we disable it.
+ */
+ list_for_each_entry(event, &session->events, list) {
+ struct lttng_enabler_ref *enabler_ref;
+ struct lttng_bytecode_runtime *runtime;
+ int enabled = 0, has_enablers_without_bytecode = 0;
+
+ switch (event->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_SYSCALL:
+ /* Enable events */
+ list_for_each_entry(enabler_ref,
+ &event->enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled) {
+ enabled = 1;
+ break;
+ }
+ }
+ break;
+ default:
+ /* Not handled with lazy sync. */
+ continue;
+ }
+ /*
+ * Enabled state is based on union of enablers, with
+ * intesection of session and channel transient enable
+ * states.
+ */
+ enabled = enabled && session->tstate && event->chan->tstate;
+
+ WRITE_ONCE(event->enabled, enabled);
+ /*
+ * Sync tracepoint registration with event enabled
+ * state.
+ */
+ if (enabled) {
+ register_event(event);
+ } else {
+ _lttng_event_unregister(event);
+ }
+
+ /* Check if has enablers without bytecode enabled */
+ list_for_each_entry(enabler_ref,
+ &event->enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled
+ && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
+ has_enablers_without_bytecode = 1;
+ break;
+ }
+ }
+ event->has_enablers_without_bytecode =
+ has_enablers_without_bytecode;
+
+ /* Enable filters */
+ list_for_each_entry(runtime,
+ &event->bytecode_runtime_head, node)
+ lttng_filter_sync_state(runtime);
+ }
+}
+
+/*
+ * Apply enablers to session events, adding events to session if need
+ * be. It is required after each modification applied to an active
+ * session, and right before session "start".
+ * "lazy" sync means we only sync if required.
+ * Should be called with sessions mutex held.
+ */
+static
+void lttng_session_lazy_sync_enablers(struct lttng_session *session)
+{
+ /* We can skip if session is not active */
+ if (!session->active)
+ return;
+ lttng_session_sync_enablers(session);
+}
+
+/*
+ * Serialize at most one packet worth of metadata into a metadata
+ * channel.
+ * We grab the metadata cache mutex to get exclusive access to our metadata
+ * buffer and to the metadata cache. Exclusive access to the metadata buffer
+ * allows us to do racy operations such as looking for remaining space left in
+ * packet and write, since mutual exclusion protects us from concurrent writes.
+ * Mutual exclusion on the metadata cache allow us to read the cache content
+ * without racing against reallocation of the cache by updates.
+ * Returns the number of bytes written in the channel, 0 if no data
+ * was written and a negative value on error.
+ */
+int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
+ struct channel *chan, bool *coherent)
+{
+ struct lib_ring_buffer_ctx ctx;