Fix: in_x32_syscall was introduced in v4.7.0
[lttng-modules.git] / src / lttng-syscalls.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-syscalls.c
4 *
5 * LTTng syscall probes.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/compat.h>
13 #include <linux/err.h>
14 #include <linux/bitmap.h>
15 #include <linux/in.h>
16 #include <linux/in6.h>
17 #include <linux/seq_file.h>
18 #include <linux/stringify.h>
19 #include <linux/file.h>
20 #include <linux/anon_inodes.h>
21 #include <linux/fcntl.h>
22 #include <linux/mman.h>
23 #include <asm/ptrace.h>
24 #include <asm/syscall.h>
25
26 #include <lttng/bitfield.h>
27 #include <wrapper/tracepoint.h>
28 #include <wrapper/rcu.h>
29 #include <wrapper/syscall.h>
30 #include <wrapper/limits.h>
31 #include <lttng/events.h>
32 #include <lttng/events-internal.h>
33 #include <lttng/utils.h>
34 #include <lttng/kernel-version.h>
35
36 #include "lttng-syscalls.h"
37
38 #ifndef CONFIG_COMPAT
39 # ifndef is_compat_task
40 # define is_compat_task() (0)
41 # endif
42 #endif
43
44 /* in_compat_syscall appears in kernel 4.6. */
45 #ifndef in_compat_syscall
46 # define in_compat_syscall() is_compat_task()
47 #endif
48
49 /* in_x32_syscall appears in kernel 4.7. */
50 #if (LTTNG_LINUX_VERSION_CODE < LTTNG_KERNEL_VERSION(4,7,0))
51 # ifdef CONFIG_X86_X32_ABI
52 # define in_x32_syscall() is_x32_task()
53 # endif
54 #endif
55
56 enum sc_type {
57 SC_TYPE_ENTRY,
58 SC_TYPE_EXIT,
59 SC_TYPE_COMPAT_ENTRY,
60 SC_TYPE_COMPAT_EXIT,
61 };
62
63 #define SYSCALL_ENTRY_TOK syscall_entry_
64 #define COMPAT_SYSCALL_ENTRY_TOK compat_syscall_entry_
65 #define SYSCALL_EXIT_TOK syscall_exit_
66 #define COMPAT_SYSCALL_EXIT_TOK compat_syscall_exit_
67
68 #define SYSCALL_ENTRY_STR __stringify(SYSCALL_ENTRY_TOK)
69 #define COMPAT_SYSCALL_ENTRY_STR __stringify(COMPAT_SYSCALL_ENTRY_TOK)
70 #define SYSCALL_EXIT_STR __stringify(SYSCALL_EXIT_TOK)
71 #define COMPAT_SYSCALL_EXIT_STR __stringify(COMPAT_SYSCALL_EXIT_TOK)
72
73 void syscall_entry_event_probe(void *__data, struct pt_regs *regs, long id);
74 void syscall_exit_event_probe(void *__data, struct pt_regs *regs, long ret);
75
76 #ifdef IA32_NR_syscalls
77 #define NR_compat_syscalls IA32_NR_syscalls
78 #else
79 #define NR_compat_syscalls NR_syscalls
80 #endif
81
82 /*
83 * Create LTTng tracepoint probes.
84 */
85 #define LTTNG_PACKAGE_BUILD
86 #define CREATE_TRACE_POINTS
87 #define TP_MODULE_NOINIT
88 #define TRACE_INCLUDE_PATH instrumentation/syscalls/headers
89
90 #define PARAMS(args...) args
91
92 /* Handle unknown syscalls */
93 #undef TRACE_SYSTEM
94 #define TRACE_SYSTEM syscalls_unknown
95 #include <instrumentation/syscalls/headers/syscalls_unknown.h>
96 #undef TRACE_SYSTEM
97
98 #undef TP_PROBE_CB
99
100 extern const struct trace_syscall_table sc_table;
101 extern const struct trace_syscall_table compat_sc_table;
102
103 /* Event syscall exit table */
104 extern const struct trace_syscall_table sc_exit_table;
105 extern const struct trace_syscall_table compat_sc_exit_table;
106
107
108 #undef SC_EXIT
109
110 #undef CREATE_SYSCALL_TABLE
111
112 struct lttng_syscall_filter {
113 DECLARE_BITMAP(sc_entry, NR_syscalls);
114 DECLARE_BITMAP(sc_exit, NR_syscalls);
115 DECLARE_BITMAP(sc_compat_entry, NR_compat_syscalls);
116 DECLARE_BITMAP(sc_compat_exit, NR_compat_syscalls);
117
118 /*
119 * Reference counters keeping track of number of events enabled
120 * for each bit.
121 */
122 u32 sc_entry_refcount_map[NR_syscalls];
123 u32 sc_exit_refcount_map[NR_syscalls];
124 u32 sc_compat_entry_refcount_map[NR_compat_syscalls];
125 u32 sc_compat_exit_refcount_map[NR_compat_syscalls];
126 };
127
128 static void syscall_entry_event_unknown(struct hlist_head *unknown_action_list_head,
129 struct pt_regs *regs, long id)
130 {
131 unsigned long args[LTTNG_SYSCALL_NR_ARGS];
132 struct lttng_kernel_event_common_private *event_priv;
133
134 lttng_syscall_get_arguments(current, regs, args);
135 lttng_hlist_for_each_entry_rcu(event_priv, unknown_action_list_head, u.syscall.node) {
136 if (unlikely(in_compat_syscall()))
137 __event_probe__compat_syscall_entry_unknown(event_priv->pub, id, args);
138 else
139 __event_probe__syscall_entry_unknown(event_priv->pub, id, args);
140 }
141 }
142
143 static __always_inline
144 void syscall_entry_event_call_func(struct hlist_head *action_list,
145 void *func, unsigned int nrargs,
146 struct pt_regs *regs)
147 {
148 struct lttng_kernel_event_common_private *event_priv;
149
150 switch (nrargs) {
151 case 0:
152 {
153 void (*fptr)(void *__data) = func;
154
155 lttng_hlist_for_each_entry_rcu(event_priv, action_list, u.syscall.node)
156 fptr(event_priv->pub);
157 break;
158 }
159 case 1:
160 {
161 void (*fptr)(void *__data, unsigned long arg0) = func;
162 unsigned long args[LTTNG_SYSCALL_NR_ARGS];
163
164 lttng_syscall_get_arguments(current, regs, args);
165 lttng_hlist_for_each_entry_rcu(event_priv, action_list, u.syscall.node)
166 fptr(event_priv->pub, args[0]);
167 break;
168 }
169 case 2:
170 {
171 void (*fptr)(void *__data,
172 unsigned long arg0,
173 unsigned long arg1) = func;
174 unsigned long args[LTTNG_SYSCALL_NR_ARGS];
175
176 lttng_syscall_get_arguments(current, regs, args);
177 lttng_hlist_for_each_entry_rcu(event_priv, action_list, u.syscall.node)
178 fptr(event_priv->pub, args[0], args[1]);
179 break;
180 }
181 case 3:
182 {
183 void (*fptr)(void *__data,
184 unsigned long arg0,
185 unsigned long arg1,
186 unsigned long arg2) = func;
187 unsigned long args[LTTNG_SYSCALL_NR_ARGS];
188
189 lttng_syscall_get_arguments(current, regs, args);
190 lttng_hlist_for_each_entry_rcu(event_priv, action_list, u.syscall.node)
191 fptr(event_priv->pub, args[0], args[1], args[2]);
192 break;
193 }
194 case 4:
195 {
196 void (*fptr)(void *__data,
197 unsigned long arg0,
198 unsigned long arg1,
199 unsigned long arg2,
200 unsigned long arg3) = func;
201 unsigned long args[LTTNG_SYSCALL_NR_ARGS];
202
203 lttng_syscall_get_arguments(current, regs, args);
204 lttng_hlist_for_each_entry_rcu(event_priv, action_list, u.syscall.node)
205 fptr(event_priv->pub, args[0], args[1], args[2], args[3]);
206 break;
207 }
208 case 5:
209 {
210 void (*fptr)(void *__data,
211 unsigned long arg0,
212 unsigned long arg1,
213 unsigned long arg2,
214 unsigned long arg3,
215 unsigned long arg4) = func;
216 unsigned long args[LTTNG_SYSCALL_NR_ARGS];
217
218 lttng_syscall_get_arguments(current, regs, args);
219 lttng_hlist_for_each_entry_rcu(event_priv, action_list, u.syscall.node)
220 fptr(event_priv->pub, args[0], args[1], args[2], args[3], args[4]);
221 break;
222 }
223 case 6:
224 {
225 void (*fptr)(void *__data,
226 unsigned long arg0,
227 unsigned long arg1,
228 unsigned long arg2,
229 unsigned long arg3,
230 unsigned long arg4,
231 unsigned long arg5) = func;
232 unsigned long args[LTTNG_SYSCALL_NR_ARGS];
233
234 lttng_syscall_get_arguments(current, regs, args);
235 lttng_hlist_for_each_entry_rcu(event_priv, action_list, u.syscall.node)
236 fptr(event_priv->pub, args[0], args[1], args[2],
237 args[3], args[4], args[5]);
238 break;
239 }
240 default:
241 break;
242 }
243 }
244
245 void syscall_entry_event_probe(void *__data, struct pt_regs *regs, long id)
246 {
247 struct lttng_kernel_syscall_table *syscall_table = __data;
248 struct hlist_head *action_list, *unknown_action_list;
249 const struct trace_syscall_entry *table, *entry;
250 size_t table_len;
251
252 #ifdef CONFIG_X86_X32_ABI
253 if (in_x32_syscall()) {
254 /* x32 system calls are not supported. */
255 return;
256 }
257 #endif
258 if (unlikely(in_compat_syscall())) {
259 struct lttng_syscall_filter *filter = syscall_table->sc_filter;
260
261 if (id < 0 || id >= NR_compat_syscalls
262 || (!READ_ONCE(syscall_table->syscall_all_entry) && !test_bit(id, filter->sc_compat_entry))) {
263 /* System call filtered out. */
264 return;
265 }
266 table = compat_sc_table.table;
267 table_len = compat_sc_table.len;
268 unknown_action_list = &syscall_table->compat_unknown_syscall_dispatch;
269 } else {
270 struct lttng_syscall_filter *filter = syscall_table->sc_filter;
271
272 if (id < 0 || id >= NR_syscalls
273 || (!READ_ONCE(syscall_table->syscall_all_entry) && !test_bit(id, filter->sc_entry))) {
274 /* System call filtered out. */
275 return;
276 }
277 table = sc_table.table;
278 table_len = sc_table.len;
279 unknown_action_list = &syscall_table->unknown_syscall_dispatch;
280 }
281 if (unlikely(id < 0 || id >= table_len)) {
282 syscall_entry_event_unknown(unknown_action_list, regs, id);
283 return;
284 }
285
286 entry = &table[id];
287 if (!entry->event_func) {
288 syscall_entry_event_unknown(unknown_action_list, regs, id);
289 return;
290 }
291
292 if (unlikely(in_compat_syscall())) {
293 action_list = &syscall_table->compat_syscall_dispatch[id];
294 } else {
295 action_list = &syscall_table->syscall_dispatch[id];
296 }
297 if (unlikely(hlist_empty(action_list)))
298 return;
299
300 syscall_entry_event_call_func(action_list, entry->event_func, entry->nrargs, regs);
301 }
302
303 static void syscall_exit_event_unknown(struct hlist_head *unknown_action_list_head,
304 struct pt_regs *regs, long id, long ret)
305 {
306 unsigned long args[LTTNG_SYSCALL_NR_ARGS];
307 struct lttng_kernel_event_common_private *event_priv;
308
309 lttng_syscall_get_arguments(current, regs, args);
310 lttng_hlist_for_each_entry_rcu(event_priv, unknown_action_list_head, u.syscall.node) {
311 if (unlikely(in_compat_syscall()))
312 __event_probe__compat_syscall_exit_unknown(event_priv->pub, id, ret,
313 args);
314 else
315 __event_probe__syscall_exit_unknown(event_priv->pub, id, ret, args);
316 }
317 }
318
319 static __always_inline
320 void syscall_exit_event_call_func(struct hlist_head *action_list,
321 void *func, unsigned int nrargs,
322 struct pt_regs *regs, long ret)
323 {
324 struct lttng_kernel_event_common_private *event_priv;
325
326 switch (nrargs) {
327 case 0:
328 {
329 void (*fptr)(void *__data, long ret) = func;
330
331 lttng_hlist_for_each_entry_rcu(event_priv, action_list, u.syscall.node)
332 fptr(event_priv->pub, ret);
333 break;
334 }
335 case 1:
336 {
337 void (*fptr)(void *__data,
338 long ret,
339 unsigned long arg0) = func;
340 unsigned long args[LTTNG_SYSCALL_NR_ARGS];
341
342 lttng_syscall_get_arguments(current, regs, args);
343 lttng_hlist_for_each_entry_rcu(event_priv, action_list, u.syscall.node)
344 fptr(event_priv->pub, ret, args[0]);
345 break;
346 }
347 case 2:
348 {
349 void (*fptr)(void *__data,
350 long ret,
351 unsigned long arg0,
352 unsigned long arg1) = func;
353 unsigned long args[LTTNG_SYSCALL_NR_ARGS];
354
355 lttng_syscall_get_arguments(current, regs, args);
356 lttng_hlist_for_each_entry_rcu(event_priv, action_list, u.syscall.node)
357 fptr(event_priv->pub, ret, args[0], args[1]);
358 break;
359 }
360 case 3:
361 {
362 void (*fptr)(void *__data,
363 long ret,
364 unsigned long arg0,
365 unsigned long arg1,
366 unsigned long arg2) = func;
367 unsigned long args[LTTNG_SYSCALL_NR_ARGS];
368
369 lttng_syscall_get_arguments(current, regs, args);
370 lttng_hlist_for_each_entry_rcu(event_priv, action_list, u.syscall.node)
371 fptr(event_priv->pub, ret, args[0], args[1], args[2]);
372 break;
373 }
374 case 4:
375 {
376 void (*fptr)(void *__data,
377 long ret,
378 unsigned long arg0,
379 unsigned long arg1,
380 unsigned long arg2,
381 unsigned long arg3) = func;
382 unsigned long args[LTTNG_SYSCALL_NR_ARGS];
383
384 lttng_syscall_get_arguments(current, regs, args);
385 lttng_hlist_for_each_entry_rcu(event_priv, action_list, u.syscall.node)
386 fptr(event_priv->pub, ret, args[0], args[1], args[2], args[3]);
387 break;
388 }
389 case 5:
390 {
391 void (*fptr)(void *__data,
392 long ret,
393 unsigned long arg0,
394 unsigned long arg1,
395 unsigned long arg2,
396 unsigned long arg3,
397 unsigned long arg4) = func;
398 unsigned long args[LTTNG_SYSCALL_NR_ARGS];
399
400 lttng_syscall_get_arguments(current, regs, args);
401 lttng_hlist_for_each_entry_rcu(event_priv, action_list, u.syscall.node)
402 fptr(event_priv->pub, ret, args[0], args[1], args[2], args[3], args[4]);
403 break;
404 }
405 case 6:
406 {
407 void (*fptr)(void *__data,
408 long ret,
409 unsigned long arg0,
410 unsigned long arg1,
411 unsigned long arg2,
412 unsigned long arg3,
413 unsigned long arg4,
414 unsigned long arg5) = func;
415 unsigned long args[LTTNG_SYSCALL_NR_ARGS];
416
417 lttng_syscall_get_arguments(current, regs, args);
418 lttng_hlist_for_each_entry_rcu(event_priv, action_list, u.syscall.node)
419 fptr(event_priv->pub, ret, args[0], args[1], args[2],
420 args[3], args[4], args[5]);
421 break;
422 }
423 default:
424 break;
425 }
426 }
427
428 void syscall_exit_event_probe(void *__data, struct pt_regs *regs, long ret)
429 {
430 struct lttng_kernel_syscall_table *syscall_table = __data;
431 struct hlist_head *action_list, *unknown_action_list;
432 const struct trace_syscall_entry *table, *entry;
433 size_t table_len;
434 long id;
435
436 #ifdef CONFIG_X86_X32_ABI
437 if (in_x32_syscall()) {
438 /* x32 system calls are not supported. */
439 return;
440 }
441 #endif
442 id = syscall_get_nr(current, regs);
443
444 if (unlikely(in_compat_syscall())) {
445 struct lttng_syscall_filter *filter = syscall_table->sc_filter;
446
447 if (id < 0 || id >= NR_compat_syscalls
448 || (!READ_ONCE(syscall_table->syscall_all_exit) && !test_bit(id, filter->sc_compat_exit))) {
449 /* System call filtered out. */
450 return;
451 }
452 table = compat_sc_exit_table.table;
453 table_len = compat_sc_exit_table.len;
454 unknown_action_list = &syscall_table->compat_unknown_syscall_exit_dispatch;
455 } else {
456 struct lttng_syscall_filter *filter = syscall_table->sc_filter;
457
458 if (id < 0 || id >= NR_syscalls
459 || (!READ_ONCE(syscall_table->syscall_all_exit) && !test_bit(id, filter->sc_exit))) {
460 /* System call filtered out. */
461 return;
462 }
463 table = sc_exit_table.table;
464 table_len = sc_exit_table.len;
465 unknown_action_list = &syscall_table->unknown_syscall_exit_dispatch;
466 }
467 if (unlikely(id < 0 || id >= table_len)) {
468 syscall_exit_event_unknown(unknown_action_list, regs, id, ret);
469 return;
470 }
471
472 entry = &table[id];
473 if (!entry->event_func) {
474 syscall_exit_event_unknown(unknown_action_list, regs, id, ret);
475 return;
476 }
477
478 if (unlikely(in_compat_syscall())) {
479 action_list = &syscall_table->compat_syscall_exit_dispatch[id];
480 } else {
481 action_list = &syscall_table->syscall_exit_dispatch[id];
482 }
483 if (unlikely(hlist_empty(action_list)))
484 return;
485
486 syscall_exit_event_call_func(action_list, entry->event_func, entry->nrargs,
487 regs, ret);
488 }
489
490 static
491 struct lttng_kernel_syscall_table *get_syscall_table_from_enabler(struct lttng_event_enabler_common *event_enabler)
492 {
493 switch (event_enabler->enabler_type) {
494 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
495 {
496 struct lttng_event_recorder_enabler *event_recorder_enabler =
497 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
498 return &event_recorder_enabler->chan->priv->parent.syscall_table;
499 }
500 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
501 {
502 struct lttng_event_notifier_enabler *event_notifier_enabler =
503 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
504 return &event_notifier_enabler->group->syscall_table;
505 }
506 default:
507 return NULL;
508 }
509 }
510
511 static
512 struct lttng_kernel_syscall_table *get_syscall_table_from_event(struct lttng_kernel_event_common *event)
513 {
514 switch (event->type) {
515 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
516 {
517 struct lttng_kernel_event_recorder *event_recorder =
518 container_of(event, struct lttng_kernel_event_recorder, parent);
519 return &event_recorder->chan->priv->parent.syscall_table;
520 }
521 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
522 {
523 struct lttng_kernel_event_notifier *event_notifier =
524 container_of(event, struct lttng_kernel_event_notifier, parent);
525 return &event_notifier->priv->group->syscall_table;
526 }
527 default:
528 return NULL;
529 }
530 }
531
532 static
533 void lttng_syscall_event_enabler_create_event(struct lttng_event_enabler_common *syscall_event_enabler,
534 const struct lttng_kernel_event_desc *desc, enum sc_type type, unsigned int syscall_nr)
535 {
536 struct lttng_kernel_event_common *event;
537
538 switch (syscall_event_enabler->enabler_type) {
539 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
540 {
541 struct lttng_event_recorder_enabler *syscall_event_recorder_enabler =
542 container_of(syscall_event_enabler, struct lttng_event_recorder_enabler, parent);
543 struct lttng_event_recorder_enabler *event_recorder_enabler;
544 struct lttng_kernel_abi_event ev;
545
546 /* We need to create an event for this syscall/enabler. */
547 memset(&ev, 0, sizeof(ev));
548 switch (type) {
549 case SC_TYPE_ENTRY:
550 ev.u.syscall.entryexit = LTTNG_KERNEL_ABI_SYSCALL_ENTRY;
551 ev.u.syscall.abi = LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE;
552 break;
553 case SC_TYPE_EXIT:
554 ev.u.syscall.entryexit = LTTNG_KERNEL_ABI_SYSCALL_EXIT;
555 ev.u.syscall.abi = LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE;
556 break;
557 case SC_TYPE_COMPAT_ENTRY:
558 ev.u.syscall.entryexit = LTTNG_KERNEL_ABI_SYSCALL_ENTRY;
559 ev.u.syscall.abi = LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT;
560 break;
561 case SC_TYPE_COMPAT_EXIT:
562 ev.u.syscall.entryexit = LTTNG_KERNEL_ABI_SYSCALL_EXIT;
563 ev.u.syscall.abi = LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT;
564 break;
565 }
566 strncpy(ev.name, desc->event_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1);
567 ev.name[LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1] = '\0';
568 ev.instrumentation = LTTNG_KERNEL_ABI_SYSCALL;
569 event_recorder_enabler = lttng_event_recorder_enabler_create(LTTNG_ENABLER_FORMAT_NAME, &ev,
570 syscall_event_recorder_enabler->chan);
571 WARN_ON_ONCE(!event_recorder_enabler);
572 if (!event_recorder_enabler)
573 return;
574 event = _lttng_kernel_event_create(&event_recorder_enabler->parent, desc);
575 WARN_ON_ONCE(IS_ERR(event));
576 lttng_event_enabler_destroy(&event_recorder_enabler->parent);
577 if (IS_ERR(event)) {
578 printk(KERN_INFO "Unable to create event recorder %s\n", desc->event_name);
579 return;
580 }
581 event->priv->u.syscall.syscall_id = syscall_nr;
582 break;
583 }
584 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
585 {
586 struct lttng_event_notifier_enabler *syscall_event_notifier_enabler =
587 container_of(syscall_event_enabler, struct lttng_event_notifier_enabler, parent);
588 struct lttng_event_notifier_enabler *event_notifier_enabler;
589 struct lttng_kernel_abi_event_notifier event_notifier_param;
590 uint64_t user_token = syscall_event_enabler->user_token;
591 uint64_t error_counter_index = syscall_event_notifier_enabler->error_counter_index;
592
593 memset(&event_notifier_param, 0, sizeof(event_notifier_param));
594 switch (type) {
595 case SC_TYPE_ENTRY:
596 event_notifier_param.event.u.syscall.entryexit = LTTNG_KERNEL_ABI_SYSCALL_ENTRY;
597 event_notifier_param.event.u.syscall.abi = LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE;
598 break;
599 case SC_TYPE_EXIT:
600 event_notifier_param.event.u.syscall.entryexit = LTTNG_KERNEL_ABI_SYSCALL_EXIT;
601 event_notifier_param.event.u.syscall.abi = LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE;
602 break;
603 case SC_TYPE_COMPAT_ENTRY:
604 event_notifier_param.event.u.syscall.entryexit = LTTNG_KERNEL_ABI_SYSCALL_ENTRY;
605 event_notifier_param.event.u.syscall.abi = LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT;
606 break;
607 case SC_TYPE_COMPAT_EXIT:
608 event_notifier_param.event.u.syscall.entryexit = LTTNG_KERNEL_ABI_SYSCALL_EXIT;
609 event_notifier_param.event.u.syscall.abi = LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT;
610 break;
611 }
612 strncat(event_notifier_param.event.name, desc->event_name,
613 LTTNG_KERNEL_ABI_SYM_NAME_LEN - strlen(event_notifier_param.event.name) - 1);
614 event_notifier_param.event.name[LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1] = '\0';
615 event_notifier_param.event.instrumentation = LTTNG_KERNEL_ABI_SYSCALL;
616 event_notifier_param.event.token = user_token;
617 event_notifier_param.error_counter_index = error_counter_index;
618
619 event_notifier_enabler = lttng_event_notifier_enabler_create(LTTNG_ENABLER_FORMAT_NAME,
620 &event_notifier_param, syscall_event_notifier_enabler->group);
621 WARN_ON_ONCE(!event_notifier_enabler);
622 event = _lttng_kernel_event_create(&event_notifier_enabler->parent, desc);
623 WARN_ON_ONCE(IS_ERR(event));
624 lttng_event_enabler_destroy(&event_notifier_enabler->parent);
625 if (IS_ERR(event)) {
626 printk(KERN_INFO "Unable to create event notifier %s\n", desc->event_name);
627 return;
628 }
629 event->priv->u.syscall.syscall_id = syscall_nr;
630 break;
631 }
632 default:
633 break;
634 }
635 }
636
637 static
638 void lttng_syscall_event_enabler_create_matching_syscall_table_events(struct lttng_event_enabler_common *syscall_event_enabler_common,
639 const struct trace_syscall_entry *table, size_t table_len, enum sc_type type)
640 {
641 struct lttng_event_ht *events_ht = lttng_get_event_ht_from_enabler(syscall_event_enabler_common);
642 const struct lttng_kernel_event_desc *desc;
643 unsigned int i;
644
645 #ifndef CONFIG_COMPAT
646 if (type == SC_TYPE_COMPAT_ENTRY || type == SC_TYPE_COMPAT_EXIT)
647 return;
648 #endif
649 /* iterate over all syscall and create event that match */
650 for (i = 0; i < table_len; i++) {
651 struct lttng_kernel_event_common_private *event_priv;
652 struct hlist_head *head;
653 bool found = false;
654
655 desc = table[i].desc;
656 if (!desc) {
657 /* Unknown syscall */
658 continue;
659 }
660
661 if (!lttng_desc_match_enabler(desc, syscall_event_enabler_common))
662 continue;
663
664 /*
665 * Check if already created.
666 */
667 head = utils_borrow_hash_table_bucket(events_ht->table, LTTNG_EVENT_HT_SIZE, desc->event_name);
668 lttng_hlist_for_each_entry(event_priv, head, hlist_node) {
669 if (lttng_event_enabler_desc_match_event(syscall_event_enabler_common, desc, event_priv->pub)) {
670 found = true;
671 break;
672 }
673 }
674 if (found)
675 continue;
676
677 lttng_syscall_event_enabler_create_event(syscall_event_enabler_common, desc, type, i);
678 }
679 }
680
681 static
682 bool lttng_syscall_event_enabler_is_wildcard_all(struct lttng_event_enabler_common *event_enabler)
683 {
684 if (event_enabler->event_param.instrumentation != LTTNG_KERNEL_ABI_SYSCALL)
685 return false;
686 if (event_enabler->event_param.u.syscall.abi != LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL)
687 return false;
688 if (event_enabler->event_param.u.syscall.match != LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME)
689 return false;
690 if (strcmp(event_enabler->event_param.name, "*"))
691 return false;
692 return true;
693 }
694
695 static
696 void create_unknown_syscall_event(struct lttng_event_enabler_common *event_enabler, enum sc_type type)
697 {
698 struct lttng_event_ht *events_ht = lttng_get_event_ht_from_enabler(event_enabler);
699 struct lttng_kernel_event_common_private *event_priv;
700 const struct lttng_kernel_event_desc *desc;
701 bool found = false;
702 struct hlist_head *head;
703
704 #ifndef CONFIG_COMPAT
705 if (type == SC_TYPE_COMPAT_ENTRY || type == SC_TYPE_COMPAT_EXIT)
706 return;
707 #endif
708 /*
709 * Considering that currently system calls can only be enabled on a per
710 * name basis (or wildcard based on a name), unknown syscall events are
711 * only used when matching *all* system calls, because this is the only
712 * case which can be associated with an unknown system call.
713 *
714 * When enabling system call on a per system call number basis will be
715 * supported, this will need to be revisited.
716 */
717 if (!lttng_syscall_event_enabler_is_wildcard_all(event_enabler))
718 return;
719
720 switch (type) {
721 case SC_TYPE_ENTRY:
722 desc = &__event_desc___syscall_entry_unknown;
723 break;
724 case SC_TYPE_EXIT:
725 desc = &__event_desc___syscall_exit_unknown;
726 break;
727 case SC_TYPE_COMPAT_ENTRY:
728 desc = &__event_desc___compat_syscall_entry_unknown;
729 break;
730 case SC_TYPE_COMPAT_EXIT:
731 desc = &__event_desc___compat_syscall_exit_unknown;
732 break;
733 default:
734 WARN_ON_ONCE(1);
735 }
736
737 /*
738 * Check if already created.
739 */
740 head = utils_borrow_hash_table_bucket(events_ht->table, LTTNG_EVENT_HT_SIZE, desc->event_name);
741 lttng_hlist_for_each_entry(event_priv, head, hlist_node) {
742 if (lttng_event_enabler_desc_match_event(event_enabler, desc, event_priv->pub)) {
743 found = true;
744 break;
745 }
746 }
747 if (!found)
748 lttng_syscall_event_enabler_create_event(event_enabler, desc, type, -1U);
749 }
750
751 static
752 void lttng_syscall_event_enabler_create_matching_events(struct lttng_event_enabler_common *event_enabler)
753 {
754 enum lttng_kernel_abi_syscall_entryexit entryexit = event_enabler->event_param.u.syscall.entryexit;
755
756 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT) {
757 lttng_syscall_event_enabler_create_matching_syscall_table_events(event_enabler,
758 sc_table.table, sc_table.len, SC_TYPE_ENTRY);
759 lttng_syscall_event_enabler_create_matching_syscall_table_events(event_enabler,
760 compat_sc_table.table, compat_sc_table.len, SC_TYPE_COMPAT_ENTRY);
761 create_unknown_syscall_event(event_enabler, SC_TYPE_ENTRY);
762 create_unknown_syscall_event(event_enabler, SC_TYPE_COMPAT_ENTRY);
763 }
764
765 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT) {
766 lttng_syscall_event_enabler_create_matching_syscall_table_events(event_enabler,
767 sc_exit_table.table, sc_exit_table.len, SC_TYPE_EXIT);
768 lttng_syscall_event_enabler_create_matching_syscall_table_events(event_enabler,
769 compat_sc_exit_table.table, compat_sc_exit_table.len, SC_TYPE_COMPAT_EXIT);
770 create_unknown_syscall_event(event_enabler, SC_TYPE_EXIT);
771 create_unknown_syscall_event(event_enabler, SC_TYPE_COMPAT_EXIT);
772 }
773 }
774
775 /*
776 * Should be called with sessions lock held.
777 */
778 int lttng_event_enabler_create_syscall_events_if_missing(struct lttng_event_enabler_common *syscall_event_enabler)
779 {
780 struct lttng_kernel_syscall_table *syscall_table = get_syscall_table_from_enabler(syscall_event_enabler);
781 int ret;
782
783 if (!syscall_table->syscall_dispatch) {
784 /* create syscall table mapping syscall to events */
785 syscall_table->syscall_dispatch = kzalloc(sizeof(struct hlist_head) * sc_table.len, GFP_KERNEL);
786 if (!syscall_table->syscall_dispatch)
787 return -ENOMEM;
788 }
789 if (!syscall_table->syscall_exit_dispatch) {
790 /* create syscall table mapping syscall to events */
791 syscall_table->syscall_exit_dispatch = kzalloc(sizeof(struct hlist_head) * sc_exit_table.len, GFP_KERNEL);
792 if (!syscall_table->syscall_exit_dispatch)
793 return -ENOMEM;
794 }
795
796 #ifdef CONFIG_COMPAT
797 if (!syscall_table->compat_syscall_dispatch) {
798 /* create syscall table mapping compat syscall to events */
799 syscall_table->compat_syscall_dispatch = kzalloc(sizeof(struct hlist_head) * compat_sc_table.len, GFP_KERNEL);
800 if (!syscall_table->compat_syscall_dispatch)
801 return -ENOMEM;
802 }
803
804 if (!syscall_table->compat_syscall_exit_dispatch) {
805 /* create syscall table mapping compat syscall to events */
806 syscall_table->compat_syscall_exit_dispatch = kzalloc(sizeof(struct hlist_head) * compat_sc_exit_table.len, GFP_KERNEL);
807 if (!syscall_table->compat_syscall_exit_dispatch)
808 return -ENOMEM;
809 }
810 #endif
811 if (!syscall_table->sc_filter) {
812 syscall_table->sc_filter = kzalloc(sizeof(struct lttng_syscall_filter),
813 GFP_KERNEL);
814 if (!syscall_table->sc_filter)
815 return -ENOMEM;
816 }
817
818 if (!syscall_table->sys_enter_registered) {
819 ret = lttng_tracepoint_probe_register("sys_enter",
820 (void *) syscall_entry_event_probe, syscall_table);
821 if (ret)
822 return ret;
823 syscall_table->sys_enter_registered = 1;
824 }
825 if (!syscall_table->sys_exit_registered) {
826 ret = lttng_tracepoint_probe_register("sys_exit",
827 (void *) syscall_exit_event_probe, syscall_table);
828 if (ret) {
829 WARN_ON_ONCE(lttng_tracepoint_probe_unregister("sys_enter",
830 (void *) syscall_entry_event_probe, syscall_table));
831 return ret;
832 }
833 syscall_table->sys_exit_registered = 1;
834 }
835
836 lttng_syscall_event_enabler_create_matching_events(syscall_event_enabler);
837
838 return 0;
839 }
840
841 int lttng_syscalls_unregister_syscall_table(struct lttng_kernel_syscall_table *syscall_table)
842 {
843 int ret;
844
845 if (!syscall_table->syscall_dispatch)
846 return 0;
847 if (syscall_table->sys_enter_registered) {
848 ret = lttng_tracepoint_probe_unregister("sys_enter",
849 (void *) syscall_entry_event_probe, syscall_table);
850 if (ret)
851 return ret;
852 syscall_table->sys_enter_registered = 0;
853 }
854 if (syscall_table->sys_exit_registered) {
855 ret = lttng_tracepoint_probe_unregister("sys_exit",
856 (void *) syscall_exit_event_probe, syscall_table);
857 if (ret)
858 return ret;
859 syscall_table->sys_exit_registered = 0;
860 }
861 return 0;
862 }
863
864 int lttng_syscalls_destroy_syscall_table(struct lttng_kernel_syscall_table *syscall_table)
865 {
866 kfree(syscall_table->syscall_dispatch);
867 kfree(syscall_table->syscall_exit_dispatch);
868 #ifdef CONFIG_COMPAT
869 kfree(syscall_table->compat_syscall_dispatch);
870 kfree(syscall_table->compat_syscall_exit_dispatch);
871 #endif
872 kfree(syscall_table->sc_filter);
873 return 0;
874 }
875
876 static
877 uint32_t get_sc_tables_len(void)
878 {
879 return sc_table.len + compat_sc_table.len;
880 }
881
882 static
883 const char *get_syscall_name(const char *desc_name,
884 enum lttng_syscall_abi abi,
885 enum lttng_syscall_entryexit entryexit)
886 {
887 size_t prefix_len = 0;
888
889
890 switch (entryexit) {
891 case LTTNG_SYSCALL_ENTRY:
892 switch (abi) {
893 case LTTNG_SYSCALL_ABI_NATIVE:
894 prefix_len = strlen(SYSCALL_ENTRY_STR);
895 break;
896 case LTTNG_SYSCALL_ABI_COMPAT:
897 prefix_len = strlen(COMPAT_SYSCALL_ENTRY_STR);
898 break;
899 }
900 break;
901 case LTTNG_SYSCALL_EXIT:
902 switch (abi) {
903 case LTTNG_SYSCALL_ABI_NATIVE:
904 prefix_len = strlen(SYSCALL_EXIT_STR);
905 break;
906 case LTTNG_SYSCALL_ABI_COMPAT:
907 prefix_len = strlen(COMPAT_SYSCALL_EXIT_STR);
908 break;
909 }
910 break;
911 }
912 WARN_ON_ONCE(prefix_len == 0);
913 return desc_name + prefix_len;
914 }
915
916 static
917 int lttng_syscall_filter_enable(
918 struct lttng_syscall_filter *filter,
919 const char *desc_name, enum lttng_syscall_abi abi,
920 enum lttng_syscall_entryexit entryexit,
921 unsigned int syscall_id)
922 {
923 const char *syscall_name;
924 unsigned long *bitmap;
925 u32 *refcount_map;
926
927 syscall_name = get_syscall_name(desc_name, abi, entryexit);
928
929 switch (entryexit) {
930 case LTTNG_SYSCALL_ENTRY:
931 switch (abi) {
932 case LTTNG_SYSCALL_ABI_NATIVE:
933 bitmap = filter->sc_entry;
934 refcount_map = filter->sc_entry_refcount_map;
935 break;
936 case LTTNG_SYSCALL_ABI_COMPAT:
937 bitmap = filter->sc_compat_entry;
938 refcount_map = filter->sc_compat_entry_refcount_map;
939 break;
940 default:
941 return -EINVAL;
942 }
943 break;
944 case LTTNG_SYSCALL_EXIT:
945 switch (abi) {
946 case LTTNG_SYSCALL_ABI_NATIVE:
947 bitmap = filter->sc_exit;
948 refcount_map = filter->sc_exit_refcount_map;
949 break;
950 case LTTNG_SYSCALL_ABI_COMPAT:
951 bitmap = filter->sc_compat_exit;
952 refcount_map = filter->sc_compat_exit_refcount_map;
953 break;
954 default:
955 return -EINVAL;
956 }
957 break;
958 default:
959 return -EINVAL;
960 }
961 if (refcount_map[syscall_id] == U32_MAX)
962 return -EOVERFLOW;
963 if (refcount_map[syscall_id]++ == 0)
964 bitmap_set(bitmap, syscall_id, 1);
965 return 0;
966 }
967
968 int lttng_syscall_filter_enable_event(struct lttng_kernel_event_common *event)
969 {
970 struct lttng_kernel_syscall_table *syscall_table = get_syscall_table_from_event(event);
971 unsigned int syscall_id = event->priv->u.syscall.syscall_id;
972 struct hlist_head *dispatch_list;
973 int ret = 0;
974
975 WARN_ON_ONCE(event->priv->instrumentation != LTTNG_KERNEL_ABI_SYSCALL);
976
977 /* Unknown syscall */
978 if (syscall_id == -1U) {
979 switch (event->priv->u.syscall.entryexit) {
980 case LTTNG_SYSCALL_ENTRY:
981 switch (event->priv->u.syscall.abi) {
982 case LTTNG_SYSCALL_ABI_NATIVE:
983 dispatch_list = &syscall_table->unknown_syscall_dispatch;
984 break;
985 case LTTNG_SYSCALL_ABI_COMPAT:
986 dispatch_list = &syscall_table->compat_unknown_syscall_dispatch;
987 break;
988 default:
989 ret = -EINVAL;
990 goto end;
991 }
992 break;
993 case LTTNG_SYSCALL_EXIT:
994 switch (event->priv->u.syscall.abi) {
995 case LTTNG_SYSCALL_ABI_NATIVE:
996 dispatch_list = &syscall_table->unknown_syscall_exit_dispatch;
997 break;
998 case LTTNG_SYSCALL_ABI_COMPAT:
999 dispatch_list = &syscall_table->compat_unknown_syscall_exit_dispatch;
1000 break;
1001 default:
1002 ret = -EINVAL;
1003 goto end;
1004 }
1005 break;
1006 default:
1007 ret = -EINVAL;
1008 goto end;
1009 }
1010 } else {
1011 ret = lttng_syscall_filter_enable(syscall_table->sc_filter,
1012 event->priv->desc->event_name, event->priv->u.syscall.abi,
1013 event->priv->u.syscall.entryexit, syscall_id);
1014 if (ret)
1015 return ret;
1016
1017 switch (event->priv->u.syscall.entryexit) {
1018 case LTTNG_SYSCALL_ENTRY:
1019 switch (event->priv->u.syscall.abi) {
1020 case LTTNG_SYSCALL_ABI_NATIVE:
1021 dispatch_list = &syscall_table->syscall_dispatch[syscall_id];
1022 break;
1023 case LTTNG_SYSCALL_ABI_COMPAT:
1024 dispatch_list = &syscall_table->compat_syscall_dispatch[syscall_id];
1025 break;
1026 default:
1027 ret = -EINVAL;
1028 goto end;
1029 }
1030 break;
1031 case LTTNG_SYSCALL_EXIT:
1032 switch (event->priv->u.syscall.abi) {
1033 case LTTNG_SYSCALL_ABI_NATIVE:
1034 dispatch_list = &syscall_table->syscall_exit_dispatch[syscall_id];
1035 break;
1036 case LTTNG_SYSCALL_ABI_COMPAT:
1037 dispatch_list = &syscall_table->compat_syscall_exit_dispatch[syscall_id];
1038 break;
1039 default:
1040 ret = -EINVAL;
1041 goto end;
1042 }
1043 break;
1044 default:
1045 ret = -EINVAL;
1046 goto end;
1047 }
1048 }
1049
1050 hlist_add_head_rcu(&event->priv->u.syscall.node, dispatch_list);
1051 end:
1052 return ret;
1053 }
1054
1055 static
1056 int lttng_syscall_filter_disable(struct lttng_syscall_filter *filter,
1057 const char *desc_name, enum lttng_syscall_abi abi,
1058 enum lttng_syscall_entryexit entryexit,
1059 unsigned int syscall_id)
1060 {
1061 const char *syscall_name;
1062 unsigned long *bitmap;
1063 u32 *refcount_map;
1064
1065 syscall_name = get_syscall_name(desc_name, abi, entryexit);
1066
1067 switch (entryexit) {
1068 case LTTNG_SYSCALL_ENTRY:
1069 switch (abi) {
1070 case LTTNG_SYSCALL_ABI_NATIVE:
1071 bitmap = filter->sc_entry;
1072 refcount_map = filter->sc_entry_refcount_map;
1073 break;
1074 case LTTNG_SYSCALL_ABI_COMPAT:
1075 bitmap = filter->sc_compat_entry;
1076 refcount_map = filter->sc_compat_entry_refcount_map;
1077 break;
1078 default:
1079 return -EINVAL;
1080 }
1081 break;
1082 case LTTNG_SYSCALL_EXIT:
1083 switch (abi) {
1084 case LTTNG_SYSCALL_ABI_NATIVE:
1085 bitmap = filter->sc_exit;
1086 refcount_map = filter->sc_exit_refcount_map;
1087 break;
1088 case LTTNG_SYSCALL_ABI_COMPAT:
1089 bitmap = filter->sc_compat_exit;
1090 refcount_map = filter->sc_compat_exit_refcount_map;
1091 break;
1092 default:
1093 return -EINVAL;
1094 }
1095 break;
1096 default:
1097 return -EINVAL;
1098 }
1099 if (refcount_map[syscall_id] == 0)
1100 return -ENOENT;
1101 if (--refcount_map[syscall_id] == 0)
1102 bitmap_clear(bitmap, syscall_id, 1);
1103 return 0;
1104 }
1105
1106 int lttng_syscall_filter_disable_event(struct lttng_kernel_event_common *event)
1107 {
1108 struct lttng_kernel_syscall_table *syscall_table = get_syscall_table_from_event(event);
1109 unsigned int syscall_id = event->priv->u.syscall.syscall_id;
1110 int ret;
1111
1112 /* Except for unknown syscall */
1113 if (syscall_id != -1U) {
1114 ret = lttng_syscall_filter_disable(syscall_table->sc_filter,
1115 event->priv->desc->event_name, event->priv->u.syscall.abi,
1116 event->priv->u.syscall.entryexit, syscall_id);
1117 if (ret)
1118 return ret;
1119 }
1120 hlist_del_rcu(&event->priv->u.syscall.node);
1121 return 0;
1122 }
1123
1124 void lttng_syscall_table_set_wildcard_all(struct lttng_event_enabler_common *event_enabler)
1125 {
1126 struct lttng_kernel_syscall_table *syscall_table = get_syscall_table_from_enabler(event_enabler);
1127 enum lttng_kernel_abi_syscall_entryexit entryexit;
1128 int enabled = event_enabler->enabled;
1129
1130 if (!lttng_syscall_event_enabler_is_wildcard_all(event_enabler))
1131 return;
1132 entryexit = event_enabler->event_param.u.syscall.entryexit;
1133 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
1134 WRITE_ONCE(syscall_table->syscall_all_entry, enabled);
1135
1136 if (entryexit == LTTNG_KERNEL_ABI_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT)
1137 WRITE_ONCE(syscall_table->syscall_all_exit, enabled);
1138 }
1139
1140 static
1141 const struct trace_syscall_entry *syscall_list_get_entry(loff_t *pos)
1142 {
1143 const struct trace_syscall_entry *entry;
1144 int iter = 0;
1145
1146 for (entry = sc_table.table;
1147 entry < sc_table.table + sc_table.len;
1148 entry++) {
1149 if (iter++ >= *pos)
1150 return entry;
1151 }
1152 for (entry = compat_sc_table.table;
1153 entry < compat_sc_table.table + compat_sc_table.len;
1154 entry++) {
1155 if (iter++ >= *pos)
1156 return entry;
1157 }
1158 /* End of list */
1159 return NULL;
1160 }
1161
1162 static
1163 void *syscall_list_start(struct seq_file *m, loff_t *pos)
1164 {
1165 return (void *) syscall_list_get_entry(pos);
1166 }
1167
1168 static
1169 void *syscall_list_next(struct seq_file *m, void *p, loff_t *ppos)
1170 {
1171 (*ppos)++;
1172 return (void *) syscall_list_get_entry(ppos);
1173 }
1174
1175 static
1176 void syscall_list_stop(struct seq_file *m, void *p)
1177 {
1178 }
1179
1180 static
1181 int get_sc_table(const struct trace_syscall_entry *entry,
1182 const struct trace_syscall_entry **table,
1183 unsigned int *bitness)
1184 {
1185 if (entry >= sc_table.table && entry < sc_table.table + sc_table.len) {
1186 if (bitness)
1187 *bitness = BITS_PER_LONG;
1188 if (table)
1189 *table = sc_table.table;
1190 return 0;
1191 }
1192 if (!(entry >= compat_sc_table.table
1193 && entry < compat_sc_table.table + compat_sc_table.len)) {
1194 return -EINVAL;
1195 }
1196 if (bitness)
1197 *bitness = 32;
1198 if (table)
1199 *table = compat_sc_table.table;
1200 return 0;
1201 }
1202
1203 static
1204 int syscall_list_show(struct seq_file *m, void *p)
1205 {
1206 const struct trace_syscall_entry *table, *entry = p;
1207 unsigned int bitness;
1208 unsigned long index;
1209 int ret;
1210 const char *name;
1211
1212 ret = get_sc_table(entry, &table, &bitness);
1213 if (ret)
1214 return ret;
1215 if (!entry->desc)
1216 return 0;
1217 if (table == sc_table.table) {
1218 index = entry - table;
1219 name = &entry->desc->event_name[strlen(SYSCALL_ENTRY_STR)];
1220 } else {
1221 index = (entry - table) + sc_table.len;
1222 name = &entry->desc->event_name[strlen(COMPAT_SYSCALL_ENTRY_STR)];
1223 }
1224 seq_printf(m, "syscall { index = %lu; name = %s; bitness = %u; };\n",
1225 index, name, bitness);
1226 return 0;
1227 }
1228
1229 static
1230 const struct seq_operations lttng_syscall_list_seq_ops = {
1231 .start = syscall_list_start,
1232 .next = syscall_list_next,
1233 .stop = syscall_list_stop,
1234 .show = syscall_list_show,
1235 };
1236
1237 static
1238 int lttng_syscall_list_open(struct inode *inode, struct file *file)
1239 {
1240 return seq_open(file, &lttng_syscall_list_seq_ops);
1241 }
1242
1243 const struct file_operations lttng_syscall_list_fops = {
1244 .owner = THIS_MODULE,
1245 .open = lttng_syscall_list_open,
1246 .read = seq_read,
1247 .llseek = seq_lseek,
1248 .release = seq_release,
1249 };
1250
1251 /*
1252 * A syscall is enabled if it is traced for either entry or exit.
1253 */
1254 long lttng_syscall_table_get_active_mask(struct lttng_kernel_syscall_table *syscall_table,
1255 struct lttng_kernel_abi_syscall_mask __user *usyscall_mask)
1256 {
1257 uint32_t len, sc_tables_len, bitmask_len;
1258 int ret = 0, bit;
1259 char *tmp_mask;
1260 struct lttng_syscall_filter *filter;
1261
1262 ret = get_user(len, &usyscall_mask->len);
1263 if (ret)
1264 return ret;
1265 sc_tables_len = get_sc_tables_len();
1266 bitmask_len = ALIGN(sc_tables_len, 8) >> 3;
1267 if (len < sc_tables_len) {
1268 return put_user(sc_tables_len, &usyscall_mask->len);
1269 }
1270 /* Array is large enough, we can copy array to user-space. */
1271 tmp_mask = kzalloc(bitmask_len, GFP_KERNEL);
1272 if (!tmp_mask)
1273 return -ENOMEM;
1274 filter = syscall_table->sc_filter;
1275
1276 for (bit = 0; bit < sc_table.len; bit++) {
1277 char state;
1278
1279 if (syscall_table->syscall_dispatch) {
1280 if (!(READ_ONCE(syscall_table->syscall_all_entry)
1281 || READ_ONCE(syscall_table->syscall_all_exit)) && filter)
1282 state = test_bit(bit, filter->sc_entry)
1283 || test_bit(bit, filter->sc_exit);
1284 else
1285 state = 1;
1286 } else {
1287 state = 0;
1288 }
1289 bt_bitfield_write_be(tmp_mask, char, bit, 1, state);
1290 }
1291 for (; bit < sc_tables_len; bit++) {
1292 char state;
1293
1294 if (syscall_table->compat_syscall_dispatch) {
1295 if (!(READ_ONCE(syscall_table->syscall_all_entry)
1296 || READ_ONCE(syscall_table->syscall_all_exit)) && filter)
1297 state = test_bit(bit - sc_table.len,
1298 filter->sc_compat_entry)
1299 || test_bit(bit - sc_table.len,
1300 filter->sc_compat_exit);
1301 else
1302 state = 1;
1303 } else {
1304 state = 0;
1305 }
1306 bt_bitfield_write_be(tmp_mask, char, bit, 1, state);
1307 }
1308 if (copy_to_user(usyscall_mask->mask, tmp_mask, bitmask_len))
1309 ret = -EFAULT;
1310 kfree(tmp_mask);
1311 return ret;
1312 }
1313
1314 int lttng_abi_syscall_list(void)
1315 {
1316 struct file *syscall_list_file;
1317 int file_fd, ret;
1318
1319 file_fd = get_unused_fd_flags(0);
1320 if (file_fd < 0) {
1321 ret = file_fd;
1322 goto fd_error;
1323 }
1324
1325 syscall_list_file = anon_inode_getfile("[lttng_syscall_list]",
1326 &lttng_syscall_list_fops,
1327 NULL, O_RDWR);
1328 if (IS_ERR(syscall_list_file)) {
1329 ret = PTR_ERR(syscall_list_file);
1330 goto file_error;
1331 }
1332 ret = lttng_syscall_list_fops.open(NULL, syscall_list_file);
1333 if (ret < 0)
1334 goto open_error;
1335 fd_install(file_fd, syscall_list_file);
1336 return file_fd;
1337
1338 open_error:
1339 fput(syscall_list_file);
1340 file_error:
1341 put_unused_fd(file_fd);
1342 fd_error:
1343 return ret;
1344 }
This page took 0.085193 seconds and 4 git commands to generate.