2 * SPDX-License-Identifier: LGPL-2.1-only
4 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * LTTng UST trace/channel/event context management.
10 #include <lttng/ust-events.h>
11 #include <lttng/ust-tracer.h>
12 #include <ust-context-provider.h>
13 #include <lttng/urcu/pointer.h>
14 #include <lttng/urcu/urcu-ust.h>
15 #include <usterr-signal-safe.h>
16 #include <ust-helper.h>
20 #include "tracepoint-internal.h"
22 #include "context-internal.h"
25 * The filter implementation requires that two consecutive "get" for the
26 * same context performed by the same thread return the same result.
29 int lttng_find_context(struct lttng_ctx
*ctx
, const char *name
)
34 if (strncmp(name
, "$ctx.", strlen("$ctx.")) == 0) {
35 subname
= name
+ strlen("$ctx.");
39 for (i
= 0; i
< ctx
->nr_fields
; i
++) {
40 /* Skip allocated (but non-initialized) contexts */
41 if (!ctx
->fields
[i
].event_field
.name
)
43 if (!strcmp(ctx
->fields
[i
].event_field
.name
, subname
))
49 int lttng_get_context_index(struct lttng_ctx
*ctx
, const char *name
)
56 if (strncmp(name
, "$ctx.", strlen("$ctx.")) == 0) {
57 subname
= name
+ strlen("$ctx.");
61 for (i
= 0; i
< ctx
->nr_fields
; i
++) {
62 /* Skip allocated (but non-initialized) contexts */
63 if (!ctx
->fields
[i
].event_field
.name
)
65 if (!strcmp(ctx
->fields
[i
].event_field
.name
, subname
))
71 static int lttng_find_context_provider(struct lttng_ctx
*ctx
, const char *name
)
75 for (i
= 0; i
< ctx
->nr_fields
; i
++) {
76 /* Skip allocated (but non-initialized) contexts */
77 if (!ctx
->fields
[i
].event_field
.name
)
79 if (!strncmp(ctx
->fields
[i
].event_field
.name
, name
,
87 * Note: as we append context information, the pointer location may change.
89 struct lttng_ctx_field
*lttng_append_context(struct lttng_ctx
**ctx_p
)
91 struct lttng_ctx_field
*field
;
92 struct lttng_ctx
*ctx
;
95 *ctx_p
= zmalloc(sizeof(struct lttng_ctx
));
98 (*ctx_p
)->largest_align
= 1;
101 if (ctx
->nr_fields
+ 1 > ctx
->allocated_fields
) {
102 struct lttng_ctx_field
*new_fields
;
104 ctx
->allocated_fields
= max_t(size_t, 1, 2 * ctx
->allocated_fields
);
105 new_fields
= zmalloc(ctx
->allocated_fields
* sizeof(struct lttng_ctx_field
));
109 memcpy(new_fields
, ctx
->fields
, sizeof(*ctx
->fields
) * ctx
->nr_fields
);
111 ctx
->fields
= new_fields
;
113 field
= &ctx
->fields
[ctx
->nr_fields
];
118 int lttng_context_add_rcu(struct lttng_ctx
**ctx_p
,
119 const struct lttng_ctx_field
*f
)
121 struct lttng_ctx
*old_ctx
= *ctx_p
, *new_ctx
= NULL
;
122 struct lttng_ctx_field
*new_fields
= NULL
;
123 struct lttng_ctx_field
*nf
;
126 new_ctx
= zmalloc(sizeof(struct lttng_ctx
));
130 new_fields
= zmalloc(new_ctx
->allocated_fields
131 * sizeof(struct lttng_ctx_field
));
136 memcpy(new_fields
, old_ctx
->fields
,
137 sizeof(*old_ctx
->fields
) * old_ctx
->nr_fields
);
138 new_ctx
->fields
= new_fields
;
140 nf
= lttng_append_context(&new_ctx
);
147 lttng_context_update(new_ctx
);
148 lttng_ust_rcu_assign_pointer(*ctx_p
, new_ctx
);
149 lttng_ust_urcu_synchronize_rcu();
151 free(old_ctx
->fields
);
158 * lttng_context_update() should be called at least once between context
159 * modification and trace start.
161 void lttng_context_update(struct lttng_ctx
*ctx
)
164 size_t largest_align
= 8; /* in bits */
166 for (i
= 0; i
< ctx
->nr_fields
; i
++) {
167 struct lttng_type
*type
;
168 size_t field_align
= 8;
170 type
= &ctx
->fields
[i
].event_field
.type
;
171 switch (type
->atype
) {
173 field_align
= type
->u
.integer
.alignment
;
177 struct lttng_basic_type
*btype
;
179 btype
= &type
->u
.legacy
.array
.elem_type
;
180 switch (btype
->atype
) {
182 field_align
= btype
->u
.basic
.integer
.alignment
;
188 case atype_array_nestable
:
190 case atype_sequence_nestable
:
197 case atype_array_nestable
:
199 const struct lttng_type
*nested_type
;
201 nested_type
= type
->u
.array_nestable
.elem_type
;
202 switch (nested_type
->atype
) {
204 field_align
= nested_type
->u
.integer
.alignment
;
210 case atype_array_nestable
:
212 case atype_sequence_nestable
:
217 field_align
= max_t(size_t, field_align
,
218 type
->u
.array_nestable
.alignment
);
223 struct lttng_basic_type
*btype
;
225 btype
= &type
->u
.legacy
.sequence
.length_type
;
226 switch (btype
->atype
) {
228 field_align
= btype
->u
.basic
.integer
.alignment
;
233 case atype_array_nestable
:
235 case atype_sequence_nestable
:
241 btype
= &type
->u
.legacy
.sequence
.elem_type
;
242 switch (btype
->atype
) {
244 field_align
= max_t(size_t,
246 btype
->u
.basic
.integer
.alignment
);
253 case atype_array_nestable
:
255 case atype_sequence_nestable
:
262 case atype_sequence_nestable
:
264 const struct lttng_type
*nested_type
;
266 nested_type
= type
->u
.sequence_nestable
.elem_type
;
267 switch (nested_type
->atype
) {
269 field_align
= nested_type
->u
.integer
.alignment
;
276 case atype_array_nestable
:
278 case atype_sequence_nestable
:
283 field_align
= max_t(size_t, field_align
,
284 type
->u
.sequence_nestable
.alignment
);
292 case atype_enum_nestable
:
297 largest_align
= max_t(size_t, largest_align
, field_align
);
299 ctx
->largest_align
= largest_align
>> 3; /* bits to bytes */
303 * Remove last context field.
305 void lttng_remove_context_field(struct lttng_ctx
**ctx_p
,
306 struct lttng_ctx_field
*field
)
308 struct lttng_ctx
*ctx
;
312 assert(&ctx
->fields
[ctx
->nr_fields
] == field
);
313 assert(field
->field_name
== NULL
);
314 memset(&ctx
->fields
[ctx
->nr_fields
], 0, sizeof(struct lttng_ctx_field
));
317 void lttng_destroy_context(struct lttng_ctx
*ctx
)
323 for (i
= 0; i
< ctx
->nr_fields
; i
++) {
324 if (ctx
->fields
[i
].destroy
)
325 ctx
->fields
[i
].destroy(&ctx
->fields
[i
]);
326 free(ctx
->fields
[i
].field_name
);
333 * Can be safely performed concurrently with tracing using the struct
334 * lttng_ctx. Using RCU update. Needs to match RCU read-side handling of
337 * This does not allow adding, removing, or changing typing of the
338 * contexts, since this needs to stay invariant for metadata. However,
339 * it allows updating the handlers associated with all contexts matching
340 * a provider (by name) while tracing is using it, in a way that ensures
341 * a single RCU read-side critical section see either all old, or all
344 int lttng_ust_context_set_provider_rcu(struct lttng_ctx
**_ctx
,
346 size_t (*get_size
)(struct lttng_ctx_field
*field
, size_t offset
),
347 void (*record
)(struct lttng_ctx_field
*field
,
348 struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
349 struct lttng_channel
*chan
),
350 void (*get_value
)(struct lttng_ctx_field
*field
,
351 struct lttng_ctx_value
*value
))
354 struct lttng_ctx
*ctx
= *_ctx
, *new_ctx
;
355 struct lttng_ctx_field
*new_fields
;
357 if (!ctx
|| !lttng_find_context_provider(ctx
, name
))
360 * We have at least one instance of context for the provider.
362 new_ctx
= zmalloc(sizeof(*new_ctx
));
366 new_fields
= zmalloc(sizeof(*new_fields
) * ctx
->allocated_fields
);
371 memcpy(new_fields
, ctx
->fields
,
372 sizeof(*new_fields
) * ctx
->allocated_fields
);
373 for (i
= 0; i
< ctx
->nr_fields
; i
++) {
374 if (strncmp(new_fields
[i
].event_field
.name
,
375 name
, strlen(name
)) != 0)
377 new_fields
[i
].get_size
= get_size
;
378 new_fields
[i
].record
= record
;
379 new_fields
[i
].get_value
= get_value
;
381 new_ctx
->fields
= new_fields
;
382 lttng_ust_rcu_assign_pointer(*_ctx
, new_ctx
);
383 lttng_ust_urcu_synchronize_rcu();
393 int lttng_context_init_all(struct lttng_ctx
**ctx
)
397 ret
= lttng_add_pthread_id_to_ctx(ctx
);
399 WARN("Cannot add context lttng_add_pthread_id_to_ctx");
402 ret
= lttng_add_vtid_to_ctx(ctx
);
404 WARN("Cannot add context lttng_add_vtid_to_ctx");
407 ret
= lttng_add_vpid_to_ctx(ctx
);
409 WARN("Cannot add context lttng_add_vpid_to_ctx");
412 ret
= lttng_add_procname_to_ctx(ctx
);
414 WARN("Cannot add context lttng_add_procname_to_ctx");
417 ret
= lttng_add_cpu_id_to_ctx(ctx
);
419 WARN("Cannot add context lttng_add_cpu_id_to_ctx");
422 ret
= lttng_add_cgroup_ns_to_ctx(ctx
);
424 WARN("Cannot add context lttng_add_cgroup_ns_to_ctx");
427 ret
= lttng_add_ipc_ns_to_ctx(ctx
);
429 WARN("Cannot add context lttng_add_ipc_ns_to_ctx");
432 ret
= lttng_add_mnt_ns_to_ctx(ctx
);
434 WARN("Cannot add context lttng_add_mnt_ns_to_ctx");
437 ret
= lttng_add_net_ns_to_ctx(ctx
);
439 WARN("Cannot add context lttng_add_net_ns_to_ctx");
442 ret
= lttng_add_pid_ns_to_ctx(ctx
);
444 WARN("Cannot add context lttng_add_pid_ns_to_ctx");
447 ret
= lttng_add_time_ns_to_ctx(ctx
);
449 WARN("Cannot add context lttng_add_time_ns_to_ctx");
452 ret
= lttng_add_user_ns_to_ctx(ctx
);
454 WARN("Cannot add context lttng_add_user_ns_to_ctx");
457 ret
= lttng_add_uts_ns_to_ctx(ctx
);
459 WARN("Cannot add context lttng_add_uts_ns_to_ctx");
462 ret
= lttng_add_vuid_to_ctx(ctx
);
464 WARN("Cannot add context lttng_add_vuid_to_ctx");
467 ret
= lttng_add_veuid_to_ctx(ctx
);
469 WARN("Cannot add context lttng_add_veuid_to_ctx");
472 ret
= lttng_add_vsuid_to_ctx(ctx
);
474 WARN("Cannot add context lttng_add_vsuid_to_ctx");
477 ret
= lttng_add_vgid_to_ctx(ctx
);
479 WARN("Cannot add context lttng_add_vgid_to_ctx");
482 ret
= lttng_add_vegid_to_ctx(ctx
);
484 WARN("Cannot add context lttng_add_vegid_to_ctx");
487 ret
= lttng_add_vsgid_to_ctx(ctx
);
489 WARN("Cannot add context lttng_add_vsgid_to_ctx");
492 lttng_context_update(*ctx
);
496 lttng_destroy_context(*ctx
);