-/*
+/* SPDX-License-Identifier: MIT
+ *
* lttng-filter-validator.c
*
* LTTng modules filter bytecode validator.
*
- * Copyright (C) 2010-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; only
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*/
-#include <linux/list.h>
+#include <linux/types.h>
#include <linux/jhash.h>
#include <linux/slab.h>
-#include "lttng-filter.h"
+#include <wrapper/list.h>
+#include <lttng-filter.h>
#define MERGE_POINT_TABLE_BITS 7
#define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
memcpy(&mp_node->stack, stack, sizeof(mp_node->stack));
head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
- hlist_for_each_entry(lookup_node, head, node) {
+ lttng_hlist_for_each_entry(lookup_node, head, node) {
if (lttng_hash_match(lookup_node, target_pc)) {
found = 1;
break;
target_pc);
return -EINVAL;
}
+ } else {
+ hlist_add_head(&mp_node->node, head);
}
- hlist_add_head(&mp_node->node, head);
return 0;
}
* Binary comparators use top of stack and top of stack -1.
*/
static
-int bin_op_compare_check(struct vstack *stack, const char *str)
+int bin_op_compare_check(struct vstack *stack, const filter_opcode_t opcode,
+ const char *str)
{
if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
- goto error_unknown;
+ goto error_empty;
switch (vstack_ax(stack)->type) {
default:
case REG_DOUBLE:
- goto error_unknown;
+ goto error_type;
case REG_STRING:
switch (vstack_bx(stack)->type) {
default:
case REG_DOUBLE:
- goto error_unknown;
-
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ goto unknown;
+ case REG_STRING:
+ break;
+ case REG_STAR_GLOB_STRING:
+ if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
+ goto error_mismatch;
+ }
+ break;
+ case REG_S64:
+ goto error_mismatch;
+ }
+ break;
+ case REG_STAR_GLOB_STRING:
+ switch (vstack_bx(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ goto unknown;
case REG_STRING:
+ if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
+ goto error_mismatch;
+ }
break;
+ case REG_STAR_GLOB_STRING:
case REG_S64:
goto error_mismatch;
}
switch (vstack_bx(stack)->type) {
default:
case REG_DOUBLE:
- goto error_unknown;
-
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ goto unknown;
case REG_STRING:
+ case REG_STAR_GLOB_STRING:
goto error_mismatch;
-
case REG_S64:
break;
}
break;
+ case REG_TYPE_UNKNOWN:
+ switch (vstack_bx(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_S64:
+ goto unknown;
+ }
+ break;
}
return 0;
-error_unknown:
+unknown:
+ return 1;
+
+error_empty:
+ printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
return -EINVAL;
error_mismatch:
printk(KERN_WARNING "type mismatch for '%s' binary operator\n", str);
return -EINVAL;
+
+error_type:
+ printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
+ return -EINVAL;
+}
+
+/*
+ * Binary bitwise operators use top of stack and top of stack -1.
+ * Return 0 if typing is known to match, 1 if typing is dynamic
+ * (unknown), negative error value on error.
+ */
+static
+int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode,
+ const char *str)
+{
+ if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
+ goto error_empty;
+
+ switch (vstack_ax(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+
+ case REG_TYPE_UNKNOWN:
+ switch (vstack_bx(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_S64:
+ goto unknown;
+ }
+ break;
+ case REG_S64:
+ switch (vstack_bx(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ goto unknown;
+ case REG_S64:
+ break;
+ }
+ break;
+ }
+ return 0;
+
+unknown:
+ return 1;
+
+error_empty:
+ printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
+ return -EINVAL;
+
+error_type:
+ printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
+ return -EINVAL;
+}
+
+static
+int validate_get_symbol(struct bytecode_runtime *bytecode,
+ const struct get_symbol *sym)
+{
+ const char *str, *str_limit;
+ size_t len_limit;
+
+ if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
+ return -EINVAL;
+
+ str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
+ str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
+ len_limit = str_limit - str;
+ if (strnlen(str, len_limit) == len_limit)
+ return -EINVAL;
+ return 0;
}
/*
*/
static
int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
- void *start_pc, void *pc)
+ char *start_pc, char *pc)
{
int ret = 0;
}
case FILTER_OP_RETURN:
+ case FILTER_OP_RETURN_S64:
{
if (unlikely(pc + sizeof(struct return_op)
> start_pc + bytecode->len)) {
case FILTER_OP_MOD:
case FILTER_OP_PLUS:
case FILTER_OP_MINUS:
- case FILTER_OP_RSHIFT:
- case FILTER_OP_LSHIFT:
- case FILTER_OP_BIN_AND:
- case FILTER_OP_BIN_OR:
- case FILTER_OP_BIN_XOR:
case FILTER_OP_EQ_DOUBLE:
case FILTER_OP_NE_DOUBLE:
case FILTER_OP_GT_DOUBLE:
case FILTER_OP_LT_STRING:
case FILTER_OP_GE_STRING:
case FILTER_OP_LE_STRING:
+ case FILTER_OP_EQ_STAR_GLOB_STRING:
+ case FILTER_OP_NE_STAR_GLOB_STRING:
case FILTER_OP_EQ_S64:
case FILTER_OP_NE_S64:
case FILTER_OP_GT_S64:
case FILTER_OP_LT_S64:
case FILTER_OP_GE_S64:
case FILTER_OP_LE_S64:
+ case FILTER_OP_BIT_RSHIFT:
+ case FILTER_OP_BIT_LSHIFT:
+ case FILTER_OP_BIT_AND:
+ case FILTER_OP_BIT_OR:
+ case FILTER_OP_BIT_XOR:
{
if (unlikely(pc + sizeof(struct binary_op)
> start_pc + bytecode->len)) {
case FILTER_OP_UNARY_PLUS_S64:
case FILTER_OP_UNARY_MINUS_S64:
case FILTER_OP_UNARY_NOT_S64:
+ case FILTER_OP_UNARY_BIT_NOT:
{
if (unlikely(pc + sizeof(struct unary_op)
> start_pc + bytecode->len)) {
break;
}
- /* load field ref */
+ /* load field and get context ref */
case FILTER_OP_LOAD_FIELD_REF:
- {
- printk(KERN_WARNING "Unknown field ref type\n");
- ret = -EINVAL;
- break;
- }
- /* get context ref */
case FILTER_OP_GET_CONTEXT_REF:
- {
- printk(KERN_WARNING "Unknown field ref type\n");
- ret = -EINVAL;
- break;
- }
case FILTER_OP_LOAD_FIELD_REF_STRING:
case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
+ case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
+ case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
case FILTER_OP_LOAD_FIELD_REF_S64:
case FILTER_OP_GET_CONTEXT_REF_STRING:
case FILTER_OP_GET_CONTEXT_REF_S64:
/* load from immediate operand */
case FILTER_OP_LOAD_STRING:
+ case FILTER_OP_LOAD_STAR_GLOB_STRING:
{
struct load_op *insn = (struct load_op *) pc;
uint32_t str_len, maxlen;
break;
}
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case FILTER_OP_GET_CONTEXT_ROOT:
+ case FILTER_OP_GET_APP_CONTEXT_ROOT:
+ case FILTER_OP_GET_PAYLOAD_ROOT:
+ case FILTER_OP_LOAD_FIELD:
+ case FILTER_OP_LOAD_FIELD_S8:
+ case FILTER_OP_LOAD_FIELD_S16:
+ case FILTER_OP_LOAD_FIELD_S32:
+ case FILTER_OP_LOAD_FIELD_S64:
+ case FILTER_OP_LOAD_FIELD_U8:
+ case FILTER_OP_LOAD_FIELD_U16:
+ case FILTER_OP_LOAD_FIELD_U32:
+ case FILTER_OP_LOAD_FIELD_U64:
+ case FILTER_OP_LOAD_FIELD_STRING:
+ case FILTER_OP_LOAD_FIELD_SEQUENCE:
+ case FILTER_OP_LOAD_FIELD_DOUBLE:
+ if (unlikely(pc + sizeof(struct load_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+
+ case FILTER_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ break;
+ }
+ ret = validate_get_symbol(bytecode, sym);
+ break;
+ }
+
+ case FILTER_OP_GET_SYMBOL_FIELD:
+ printk(KERN_WARNING "Unexpected get symbol field\n");
+ ret = -EINVAL;
+ break;
+
+ case FILTER_OP_GET_INDEX_U16:
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+
+ case FILTER_OP_GET_INDEX_U64:
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
}
return ret;
struct hlist_head *head;
head = &mp_table->mp_head[i];
- hlist_for_each_entry_safe(mp_node, tmp, head, node) {
+ lttng_hlist_for_each_entry_safe(mp_node, tmp, head, node) {
kfree(mp_node);
nr_nodes++;
}
/*
* Return value:
- * 0: success
+ * >=0: success
* <0: error
*/
static
int validate_instruction_context(struct bytecode_runtime *bytecode,
struct vstack *stack,
- void *start_pc,
- void *pc)
+ char *start_pc,
+ char *pc)
{
int ret = 0;
+ const filter_opcode_t opcode = *(filter_opcode_t *) pc;
- switch (*(filter_opcode_t *) pc) {
+ switch (opcode) {
case FILTER_OP_UNKNOWN:
default:
{
}
case FILTER_OP_RETURN:
+ case FILTER_OP_RETURN_S64:
{
goto end;
}
case FILTER_OP_MOD:
case FILTER_OP_PLUS:
case FILTER_OP_MINUS:
- case FILTER_OP_RSHIFT:
- case FILTER_OP_LSHIFT:
- case FILTER_OP_BIN_AND:
- case FILTER_OP_BIN_OR:
- case FILTER_OP_BIN_XOR:
/* Floating point */
case FILTER_OP_EQ_DOUBLE:
case FILTER_OP_NE_DOUBLE:
case FILTER_OP_EQ:
{
- ret = bin_op_compare_check(stack, "==");
- if (ret)
+ ret = bin_op_compare_check(stack, opcode, "==");
+ if (ret < 0)
goto end;
break;
}
case FILTER_OP_NE:
{
- ret = bin_op_compare_check(stack, "!=");
- if (ret)
+ ret = bin_op_compare_check(stack, opcode, "!=");
+ if (ret < 0)
goto end;
break;
}
case FILTER_OP_GT:
{
- ret = bin_op_compare_check(stack, ">");
- if (ret)
+ ret = bin_op_compare_check(stack, opcode, ">");
+ if (ret < 0)
goto end;
break;
}
case FILTER_OP_LT:
{
- ret = bin_op_compare_check(stack, "<");
- if (ret)
+ ret = bin_op_compare_check(stack, opcode, "<");
+ if (ret < 0)
goto end;
break;
}
case FILTER_OP_GE:
{
- ret = bin_op_compare_check(stack, ">=");
- if (ret)
+ ret = bin_op_compare_check(stack, opcode, ">=");
+ if (ret < 0)
goto end;
break;
}
case FILTER_OP_LE:
{
- ret = bin_op_compare_check(stack, "<=");
- if (ret)
+ ret = bin_op_compare_check(stack, opcode, "<=");
+ if (ret < 0)
goto end;
break;
}
break;
}
+
+ case FILTER_OP_EQ_STAR_GLOB_STRING:
+ case FILTER_OP_NE_STAR_GLOB_STRING:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ printk(KERN_WARNING "Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
+ && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
+ printk(KERN_WARNING "Unexpected register type for globbing pattern comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
case FILTER_OP_EQ_S64:
case FILTER_OP_NE_S64:
case FILTER_OP_GT_S64:
break;
}
+ case FILTER_OP_BIT_RSHIFT:
+ ret = bin_op_bitwise_check(stack, opcode, ">>");
+ if (ret < 0)
+ goto end;
+ break;
+ case FILTER_OP_BIT_LSHIFT:
+ ret = bin_op_bitwise_check(stack, opcode, "<<");
+ if (ret < 0)
+ goto end;
+ break;
+ case FILTER_OP_BIT_AND:
+ ret = bin_op_bitwise_check(stack, opcode, "&");
+ if (ret < 0)
+ goto end;
+ break;
+ case FILTER_OP_BIT_OR:
+ ret = bin_op_bitwise_check(stack, opcode, "|");
+ if (ret < 0)
+ goto end;
+ break;
+ case FILTER_OP_BIT_XOR:
+ ret = bin_op_bitwise_check(stack, opcode, "^");
+ if (ret < 0)
+ goto end;
+ break;
+
/* unary */
case FILTER_OP_UNARY_PLUS:
case FILTER_OP_UNARY_MINUS:
goto end;
case REG_STRING:
+ case REG_STAR_GLOB_STRING:
printk(KERN_WARNING "Unary op can only be applied to numeric or floating point registers\n");
ret = -EINVAL;
goto end;
+ case REG_S64:
+ case REG_TYPE_UNKNOWN:
+ break;
+ }
+ break;
+ }
+ case FILTER_OP_UNARY_BIT_NOT:
+ {
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_DOUBLE:
+ printk(KERN_WARNING "Unary bitwise op can only be applied to numeric registers\n");
+ ret = -EINVAL;
+ goto end;
case REG_S64:
break;
+ case REG_TYPE_UNKNOWN:
+ break;
}
break;
}
}
case FILTER_OP_LOAD_FIELD_REF_STRING:
case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
+ case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
+ case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
{
struct load_op *insn = (struct load_op *) pc;
struct field_ref *ref = (struct field_ref *) insn->data;
/* load from immediate operand */
case FILTER_OP_LOAD_STRING:
+ case FILTER_OP_LOAD_STAR_GLOB_STRING:
{
break;
}
goto end;
case REG_STRING:
+ case REG_STAR_GLOB_STRING:
printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
ret = -EINVAL;
goto end;
break;
}
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case FILTER_OP_GET_CONTEXT_ROOT:
+ {
+ dbg_printk("Validate get context root\n");
+ break;
+ }
+ case FILTER_OP_GET_APP_CONTEXT_ROOT:
+ {
+ dbg_printk("Validate get app context root\n");
+ break;
+ }
+ case FILTER_OP_GET_PAYLOAD_ROOT:
+ {
+ dbg_printk("Validate get payload root\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD:
+ {
+ /*
+ * We tolerate that field type is unknown at validation,
+ * because we are performing the load specialization in
+ * a phase after validation.
+ */
+ dbg_printk("Validate load field\n");
+ break;
+ }
+
+ /*
+ * Disallow already specialized bytecode op load field instructions to
+ * ensure that the received bytecode does not:
+ *
+ * - Read user-space memory without proper get_user accessors,
+ * - Read a memory area larger than the memory targeted by the instrumentation.
+ */
+ case FILTER_OP_LOAD_FIELD_S8:
+ case FILTER_OP_LOAD_FIELD_S16:
+ case FILTER_OP_LOAD_FIELD_S32:
+ case FILTER_OP_LOAD_FIELD_S64:
+ case FILTER_OP_LOAD_FIELD_U8:
+ case FILTER_OP_LOAD_FIELD_U16:
+ case FILTER_OP_LOAD_FIELD_U32:
+ case FILTER_OP_LOAD_FIELD_U64:
+ case FILTER_OP_LOAD_FIELD_STRING:
+ case FILTER_OP_LOAD_FIELD_SEQUENCE:
+ case FILTER_OP_LOAD_FIELD_DOUBLE:
+ {
+ dbg_printk("Validate load field, reject specialized load instruction (%d)\n",
+ (int) opcode);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case FILTER_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ dbg_printk("Validate get symbol offset %u\n", sym->offset);
+ break;
+ }
+
+ case FILTER_OP_GET_SYMBOL_FIELD:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ dbg_printk("Validate get symbol field offset %u\n", sym->offset);
+ break;
+ }
+
+ case FILTER_OP_GET_INDEX_U16:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
+
+ dbg_printk("Validate get index u16 index %u\n", get_index->index);
+ break;
+ }
+
+ case FILTER_OP_GET_INDEX_U64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
+
+ dbg_printk("Validate get index u64 index %llu\n",
+ (unsigned long long) get_index->index);
+ break;
+ }
}
end:
return ret;
int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
struct mp_table *mp_table,
struct vstack *stack,
- void *start_pc,
- void *pc)
+ char *start_pc,
+ char *pc)
{
int ret, found = 0;
unsigned long target_pc = pc - start_pc;
/* Validate the context resulting from the previous instruction */
ret = validate_instruction_context(bytecode, stack, start_pc, pc);
- if (ret)
+ if (ret < 0)
return ret;
/* Validate merge points */
hash = jhash_1word(target_pc, 0);
head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
- hlist_for_each_entry(mp_node, head, node) {
+ lttng_hlist_for_each_entry(mp_node, head, node) {
if (lttng_hash_match(mp_node, target_pc)) {
found = 1;
break;
}
/*
+ * Validate load instructions: specialized instructions not accepted as input.
+ *
* Return value:
* >0: going to next insn.
* 0: success, stop iteration.
* <0: error
*/
static
-int exec_insn(struct bytecode_runtime *bytecode,
- struct mp_table *mp_table,
- struct vstack *stack,
- void **_next_pc,
- void *pc)
+int validate_load(char **_next_pc,
+ char *pc)
{
- int ret = 1;
- void *next_pc = *_next_pc;
+ int ret = 0;
+ char *next_pc = *_next_pc;
switch (*(filter_opcode_t *) pc) {
case FILTER_OP_UNKNOWN:
default:
{
- printk(KERN_WARNING "unknown bytecode op %u\n",
+ printk(KERN_WARNING "LTTng: bytecode: unknown bytecode op %u\n",
(unsigned int) *(filter_opcode_t *) pc);
ret = -EINVAL;
goto end;
case FILTER_OP_RETURN:
{
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- ret = 0;
- goto end;
+ next_pc += sizeof(struct return_op);
+ break;
+ }
+
+ case FILTER_OP_RETURN_S64:
+ {
+ next_pc += sizeof(struct return_op);
+ break;
}
/* binary */
case FILTER_OP_MOD:
case FILTER_OP_PLUS:
case FILTER_OP_MINUS:
- case FILTER_OP_RSHIFT:
- case FILTER_OP_LSHIFT:
- case FILTER_OP_BIN_AND:
- case FILTER_OP_BIN_OR:
- case FILTER_OP_BIN_XOR:
/* Floating point */
case FILTER_OP_EQ_DOUBLE:
case FILTER_OP_NE_DOUBLE:
case FILTER_OP_LOAD_DOUBLE:
case FILTER_OP_CAST_DOUBLE_TO_S64:
{
- printk(KERN_WARNING "unsupported bytecode op %u\n",
+ printk(KERN_WARNING "LTTng: bytecode: unsupported bytecode op %u\n",
(unsigned int) *(filter_opcode_t *) pc);
ret = -EINVAL;
goto end;
case FILTER_OP_LT_STRING:
case FILTER_OP_GE_STRING:
case FILTER_OP_LE_STRING:
+ case FILTER_OP_EQ_STAR_GLOB_STRING:
+ case FILTER_OP_NE_STAR_GLOB_STRING:
case FILTER_OP_EQ_S64:
case FILTER_OP_NE_S64:
case FILTER_OP_GT_S64:
case FILTER_OP_LT_S64:
case FILTER_OP_GE_S64:
case FILTER_OP_LE_S64:
+ case FILTER_OP_BIT_RSHIFT:
+ case FILTER_OP_BIT_LSHIFT:
+ case FILTER_OP_BIT_AND:
+ case FILTER_OP_BIT_OR:
+ case FILTER_OP_BIT_XOR:
{
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
next_pc += sizeof(struct binary_op);
break;
}
/* unary */
case FILTER_OP_UNARY_PLUS:
case FILTER_OP_UNARY_MINUS:
- case FILTER_OP_UNARY_NOT:
case FILTER_OP_UNARY_PLUS_S64:
case FILTER_OP_UNARY_MINUS_S64:
case FILTER_OP_UNARY_NOT_S64:
+ case FILTER_OP_UNARY_NOT:
+ case FILTER_OP_UNARY_BIT_NOT:
{
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
next_pc += sizeof(struct unary_op);
break;
}
case FILTER_OP_AND:
case FILTER_OP_OR:
{
- struct logical_op *insn = (struct logical_op *) pc;
- int merge_ret;
+ next_pc += sizeof(struct logical_op);
+ break;
+ }
- /* Add merge point to table */
- merge_ret = merge_point_add_check(mp_table,
- insn->skip_offset, stack);
+ /* load field ref */
+ case FILTER_OP_LOAD_FIELD_REF:
+ /* get context ref */
+ case FILTER_OP_GET_CONTEXT_REF:
+ {
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_REF_STRING:
+ case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
+ case FILTER_OP_GET_CONTEXT_REF_STRING:
+ case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
+ case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
+ case FILTER_OP_LOAD_FIELD_REF_S64:
+ case FILTER_OP_GET_CONTEXT_REF_S64:
+ {
+ /*
+ * Reject specialized load field ref instructions.
+ */
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* load from immediate operand */
+ case FILTER_OP_LOAD_STRING:
+ case FILTER_OP_LOAD_STAR_GLOB_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case FILTER_OP_LOAD_S64:
+ {
+ next_pc += sizeof(struct load_op) + sizeof(struct literal_numeric);
+ break;
+ }
+
+ case FILTER_OP_CAST_TO_S64:
+ case FILTER_OP_CAST_NOP:
+ {
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case FILTER_OP_GET_CONTEXT_ROOT:
+ case FILTER_OP_GET_APP_CONTEXT_ROOT:
+ case FILTER_OP_GET_PAYLOAD_ROOT:
+ case FILTER_OP_LOAD_FIELD:
+ {
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD_S8:
+ case FILTER_OP_LOAD_FIELD_S16:
+ case FILTER_OP_LOAD_FIELD_S32:
+ case FILTER_OP_LOAD_FIELD_S64:
+ case FILTER_OP_LOAD_FIELD_U8:
+ case FILTER_OP_LOAD_FIELD_U16:
+ case FILTER_OP_LOAD_FIELD_U32:
+ case FILTER_OP_LOAD_FIELD_U64:
+ case FILTER_OP_LOAD_FIELD_STRING:
+ case FILTER_OP_LOAD_FIELD_SEQUENCE:
+ case FILTER_OP_LOAD_FIELD_DOUBLE:
+ {
+ /*
+ * Reject specialized load field instructions.
+ */
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case FILTER_OP_GET_SYMBOL:
+ case FILTER_OP_GET_SYMBOL_FIELD:
+ {
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ break;
+ }
+
+ case FILTER_OP_GET_INDEX_U16:
+ {
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ break;
+ }
+
+ case FILTER_OP_GET_INDEX_U64:
+ {
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ break;
+ }
+
+ }
+end:
+ *_next_pc = next_pc;
+ return ret;
+}
+
+/*
+ * Return value:
+ * >0: going to next insn.
+ * 0: success, stop iteration.
+ * <0: error
+ */
+static
+int exec_insn(struct bytecode_runtime *bytecode,
+ struct mp_table *mp_table,
+ struct vstack *stack,
+ char **_next_pc,
+ char *pc)
+{
+ int ret = 1;
+ char *next_pc = *_next_pc;
+
+ switch (*(filter_opcode_t *) pc) {
+ case FILTER_OP_UNKNOWN:
+ default:
+ {
+ printk(KERN_WARNING "unknown bytecode op %u\n",
+ (unsigned int) *(filter_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case FILTER_OP_RETURN:
+ {
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = 0;
+ goto end;
+ }
+
+ case FILTER_OP_RETURN_S64:
+ {
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ break;
+ default:
+ case REG_TYPE_UNKNOWN:
+ printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = 0;
+ goto end;
+ }
+
+ /* binary */
+ case FILTER_OP_MUL:
+ case FILTER_OP_DIV:
+ case FILTER_OP_MOD:
+ case FILTER_OP_PLUS:
+ case FILTER_OP_MINUS:
+ /* Floating point */
+ case FILTER_OP_EQ_DOUBLE:
+ case FILTER_OP_NE_DOUBLE:
+ case FILTER_OP_GT_DOUBLE:
+ case FILTER_OP_LT_DOUBLE:
+ case FILTER_OP_GE_DOUBLE:
+ case FILTER_OP_LE_DOUBLE:
+ case FILTER_OP_EQ_DOUBLE_S64:
+ case FILTER_OP_NE_DOUBLE_S64:
+ case FILTER_OP_GT_DOUBLE_S64:
+ case FILTER_OP_LT_DOUBLE_S64:
+ case FILTER_OP_GE_DOUBLE_S64:
+ case FILTER_OP_LE_DOUBLE_S64:
+ case FILTER_OP_EQ_S64_DOUBLE:
+ case FILTER_OP_NE_S64_DOUBLE:
+ case FILTER_OP_GT_S64_DOUBLE:
+ case FILTER_OP_LT_S64_DOUBLE:
+ case FILTER_OP_GE_S64_DOUBLE:
+ case FILTER_OP_LE_S64_DOUBLE:
+ case FILTER_OP_UNARY_PLUS_DOUBLE:
+ case FILTER_OP_UNARY_MINUS_DOUBLE:
+ case FILTER_OP_UNARY_NOT_DOUBLE:
+ case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
+ case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
+ case FILTER_OP_LOAD_DOUBLE:
+ case FILTER_OP_CAST_DOUBLE_TO_S64:
+ {
+ printk(KERN_WARNING "unsupported bytecode op %u\n",
+ (unsigned int) *(filter_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case FILTER_OP_EQ:
+ case FILTER_OP_NE:
+ case FILTER_OP_GT:
+ case FILTER_OP_LT:
+ case FILTER_OP_GE:
+ case FILTER_OP_LE:
+ case FILTER_OP_EQ_STRING:
+ case FILTER_OP_NE_STRING:
+ case FILTER_OP_GT_STRING:
+ case FILTER_OP_LT_STRING:
+ case FILTER_OP_GE_STRING:
+ case FILTER_OP_LE_STRING:
+ case FILTER_OP_EQ_STAR_GLOB_STRING:
+ case FILTER_OP_NE_STAR_GLOB_STRING:
+ case FILTER_OP_EQ_S64:
+ case FILTER_OP_NE_S64:
+ case FILTER_OP_GT_S64:
+ case FILTER_OP_LT_S64:
+ case FILTER_OP_GE_S64:
+ case FILTER_OP_LE_S64:
+ case FILTER_OP_BIT_RSHIFT:
+ case FILTER_OP_BIT_LSHIFT:
+ case FILTER_OP_BIT_AND:
+ case FILTER_OP_BIT_OR:
+ case FILTER_OP_BIT_XOR:
+ {
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ /* unary */
+ case FILTER_OP_UNARY_PLUS:
+ case FILTER_OP_UNARY_MINUS:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case FILTER_OP_UNARY_PLUS_S64:
+ case FILTER_OP_UNARY_MINUS_S64:
+ case FILTER_OP_UNARY_NOT_S64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ break;
+ default:
+ printk(KERN_WARNING "Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case FILTER_OP_UNARY_NOT:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case FILTER_OP_UNARY_BIT_NOT:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_TYPE_UNKNOWN:
+ break;
+ case REG_DOUBLE:
+ default:
+ printk(KERN_WARNING "Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ /* logical */
+ case FILTER_OP_AND:
+ case FILTER_OP_OR:
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+ int merge_ret;
+
+ /* Add merge point to table */
+ merge_ret = merge_point_add_check(mp_table,
+ insn->skip_offset, stack);
if (merge_ret) {
ret = merge_ret;
goto end;
}
+
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ /* There is always a cast-to-s64 operation before a or/and op. */
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ break;
+ default:
+ printk(KERN_WARNING "Incorrect register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
/* Continue to next instruction */
/* Pop 1 when jump not taken */
if (vstack_pop(stack)) {
case FILTER_OP_LOAD_FIELD_REF_STRING:
case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
case FILTER_OP_GET_CONTEXT_REF_STRING:
+ case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
+ case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
{
if (vstack_push(stack)) {
ret = -EINVAL;
break;
}
+ case FILTER_OP_LOAD_STAR_GLOB_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
case FILTER_OP_LOAD_S64:
{
if (vstack_push(stack)) {
ret = -EINVAL;
goto end;
}
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_DOUBLE:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "Incorrect register type %d for cast\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
vstack_ax(stack)->type = REG_S64;
next_pc += sizeof(struct cast_op);
break;
break;
}
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case FILTER_OP_GET_CONTEXT_ROOT:
+ case FILTER_OP_GET_APP_CONTEXT_ROOT:
+ case FILTER_OP_GET_PAYLOAD_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD_S8:
+ case FILTER_OP_LOAD_FIELD_S16:
+ case FILTER_OP_LOAD_FIELD_S32:
+ case FILTER_OP_LOAD_FIELD_S64:
+ case FILTER_OP_LOAD_FIELD_U8:
+ case FILTER_OP_LOAD_FIELD_U16:
+ case FILTER_OP_LOAD_FIELD_U32:
+ case FILTER_OP_LOAD_FIELD_U64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD_STRING:
+ case FILTER_OP_LOAD_FIELD_SEQUENCE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_GET_SYMBOL:
+ case FILTER_OP_GET_SYMBOL_FIELD:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ break;
+ }
+
+ case FILTER_OP_GET_INDEX_U16:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ break;
+ }
+
+ case FILTER_OP_GET_INDEX_U64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ break;
+ }
+
}
end:
*_next_pc = next_pc;
return ret;
}
+int lttng_filter_validate_bytecode_load(struct bytecode_runtime *bytecode)
+{
+ char *pc, *next_pc, *start_pc;
+ int ret = -EINVAL;
+
+ start_pc = &bytecode->code[0];
+ for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
+ pc = next_pc) {
+ ret = bytecode_validate_overflow(bytecode, start_pc, pc);
+ if (ret != 0) {
+ if (ret == -ERANGE)
+ printk(KERN_WARNING "LTTng: bytecode: bytecode overflow\n");
+ goto end;
+ }
+ dbg_printk("Validating loads: op %s (%u)\n",
+ lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc),
+ (unsigned int) *(filter_opcode_t *) pc);
+
+ ret = validate_load(&next_pc, pc);
+ if (ret)
+ goto end;
+ }
+end:
+ return ret;
+}
+
/*
* Never called concurrently (hash seed is shared).
*/
int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
{
struct mp_table *mp_table;
- void *pc, *next_pc, *start_pc;
+ char *pc, *next_pc, *start_pc;
int ret = -EINVAL;
struct vstack stack;
printk(KERN_WARNING "Error allocating hash table for bytecode validation\n");
return -ENOMEM;
}
- start_pc = &bytecode->data[0];
+ start_pc = &bytecode->code[0];
for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
pc = next_pc) {
ret = bytecode_validate_overflow(bytecode, start_pc, pc);
/*
* For each instruction, validate the current context
* (traversal of entire execution flow), and validate
- * all merge points targeting this instruction.
+ * all merge points targeting this instruction.
*/
ret = validate_instruction_all_contexts(bytecode, mp_table,
&stack, start_pc, pc);