X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=lttng-filter-validator.c;h=39cd25461fa9b2619c727873000a4ebf44008393;hb=ce5579a716bff80aa13d4647ab98482f403cc9ef;hp=cc8f45962c73f51c7a0c944579df6675a447aba0;hpb=3834b99f4341209754c4955ec853dc250b33ed4b;p=lttng-modules.git diff --git a/lttng-filter-validator.c b/lttng-filter-validator.c index cc8f4596..39cd2546 100644 --- a/lttng-filter-validator.c +++ b/lttng-filter-validator.c @@ -1,27 +1,10 @@ -/* +/* SPDX-License-Identifier: MIT + * * lttng-filter-validator.c * * LTTng modules filter bytecode validator. * * Copyright (C) 2010-2016 Mathieu Desnoyers - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include @@ -303,6 +286,7 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, } case FILTER_OP_RETURN: + case FILTER_OP_RETURN_S64: { if (unlikely(pc + sizeof(struct return_op) > start_pc + bytecode->len)) { @@ -317,8 +301,6 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, case FILTER_OP_MOD: case FILTER_OP_PLUS: case FILTER_OP_MINUS: - case FILTER_OP_RSHIFT: - case FILTER_OP_LSHIFT: case FILTER_OP_EQ_DOUBLE: case FILTER_OP_NE_DOUBLE: case FILTER_OP_GT_DOUBLE: @@ -372,6 +354,8 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, case FILTER_OP_LT_S64: case FILTER_OP_GE_S64: case FILTER_OP_LE_S64: + case FILTER_OP_BIT_RSHIFT: + case FILTER_OP_BIT_LSHIFT: case FILTER_OP_BIT_AND: case FILTER_OP_BIT_OR: case FILTER_OP_BIT_XOR: @@ -390,6 +374,7 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, case FILTER_OP_UNARY_PLUS_S64: case FILTER_OP_UNARY_MINUS_S64: case FILTER_OP_UNARY_NOT_S64: + case FILTER_OP_UNARY_BIT_NOT: { if (unlikely(pc + sizeof(struct unary_op) > start_pc + bytecode->len)) { @@ -409,21 +394,9 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, break; } - /* load field ref */ + /* load field and get context ref */ case FILTER_OP_LOAD_FIELD_REF: - { - printk(KERN_WARNING "Unknown field ref type\n"); - ret = -EINVAL; - break; - } - - /* get context ref */ case FILTER_OP_GET_CONTEXT_REF: - { - printk(KERN_WARNING "Unknown field ref type\n"); - ret = -EINVAL; - break; - } case FILTER_OP_LOAD_FIELD_REF_STRING: case FILTER_OP_LOAD_FIELD_REF_SEQUENCE: case FILTER_OP_LOAD_FIELD_REF_USER_STRING: @@ -512,6 +485,7 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol) > start_pc + bytecode->len)) { ret = -ERANGE; + break; } ret = validate_get_symbol(bytecode, sym); break; @@ -585,6 +559,7 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, } case FILTER_OP_RETURN: + case FILTER_OP_RETURN_S64: { goto end; } @@ -595,8 +570,6 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, case FILTER_OP_MOD: case FILTER_OP_PLUS: case FILTER_OP_MINUS: - case FILTER_OP_RSHIFT: - case FILTER_OP_LSHIFT: /* Floating point */ case FILTER_OP_EQ_DOUBLE: case FILTER_OP_NE_DOUBLE: @@ -733,6 +706,16 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, break; } + case FILTER_OP_BIT_RSHIFT: + ret = bin_op_bitwise_check(stack, opcode, ">>"); + if (ret < 0) + goto end; + break; + case FILTER_OP_BIT_LSHIFT: + ret = bin_op_bitwise_check(stack, opcode, "<<"); + if (ret < 0) + goto end; + break; case FILTER_OP_BIT_AND: ret = bin_op_bitwise_check(stack, opcode, "&"); if (ret < 0) @@ -777,6 +760,32 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, } break; } + case FILTER_OP_UNARY_BIT_NOT: + { + if (!vstack_ax(stack)) { + printk(KERN_WARNING "Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + default: + printk(KERN_WARNING "unknown register type\n"); + ret = -EINVAL; + goto end; + + case REG_STRING: + case REG_STAR_GLOB_STRING: + case REG_DOUBLE: + printk(KERN_WARNING "Unary bitwise op can only be applied to numeric registers\n"); + ret = -EINVAL; + goto end; + case REG_S64: + break; + case REG_TYPE_UNKNOWN: + break; + } + break; + } case FILTER_OP_UNARY_PLUS_S64: case FILTER_OP_UNARY_MINUS_S64: @@ -955,60 +964,30 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, dbg_printk("Validate load field\n"); break; } + + /* + * Disallow already specialized bytecode op load field instructions to + * ensure that the received bytecode does not: + * + * - Read user-space memory without proper get_user accessors, + * - Read a memory area larger than the memory targeted by the instrumentation. + */ case FILTER_OP_LOAD_FIELD_S8: - { - dbg_printk("Validate load field s8\n"); - break; - } case FILTER_OP_LOAD_FIELD_S16: - { - dbg_printk("Validate load field s16\n"); - break; - } case FILTER_OP_LOAD_FIELD_S32: - { - dbg_printk("Validate load field s32\n"); - break; - } case FILTER_OP_LOAD_FIELD_S64: - { - dbg_printk("Validate load field s64\n"); - break; - } case FILTER_OP_LOAD_FIELD_U8: - { - dbg_printk("Validate load field u8\n"); - break; - } case FILTER_OP_LOAD_FIELD_U16: - { - dbg_printk("Validate load field u16\n"); - break; - } case FILTER_OP_LOAD_FIELD_U32: - { - dbg_printk("Validate load field u32\n"); - break; - } case FILTER_OP_LOAD_FIELD_U64: - { - dbg_printk("Validate load field u64\n"); - break; - } case FILTER_OP_LOAD_FIELD_STRING: - { - dbg_printk("Validate load field string\n"); - break; - } case FILTER_OP_LOAD_FIELD_SEQUENCE: - { - dbg_printk("Validate load field sequence\n"); - break; - } case FILTER_OP_LOAD_FIELD_DOUBLE: { - dbg_printk("Validate load field double\n"); - break; + dbg_printk("Validate load field, reject specialized load instruction (%d)\n", + (int) opcode); + ret = -EINVAL; + goto end; } case FILTER_OP_GET_SYMBOL: @@ -1100,6 +1079,235 @@ int validate_instruction_all_contexts(struct bytecode_runtime *bytecode, return 0; } +/* + * Validate load instructions: specialized instructions not accepted as input. + * + * Return value: + * >0: going to next insn. + * 0: success, stop iteration. + * <0: error + */ +static +int validate_load(char **_next_pc, + char *pc) +{ + int ret = 0; + char *next_pc = *_next_pc; + + switch (*(filter_opcode_t *) pc) { + case FILTER_OP_UNKNOWN: + default: + { + printk(KERN_WARNING "LTTng: bytecode: unknown bytecode op %u\n", + (unsigned int) *(filter_opcode_t *) pc); + ret = -EINVAL; + goto end; + } + + case FILTER_OP_RETURN: + { + next_pc += sizeof(struct return_op); + break; + } + + case FILTER_OP_RETURN_S64: + { + next_pc += sizeof(struct return_op); + break; + } + + /* binary */ + case FILTER_OP_MUL: + case FILTER_OP_DIV: + case FILTER_OP_MOD: + case FILTER_OP_PLUS: + case FILTER_OP_MINUS: + /* Floating point */ + case FILTER_OP_EQ_DOUBLE: + case FILTER_OP_NE_DOUBLE: + case FILTER_OP_GT_DOUBLE: + case FILTER_OP_LT_DOUBLE: + case FILTER_OP_GE_DOUBLE: + case FILTER_OP_LE_DOUBLE: + case FILTER_OP_EQ_DOUBLE_S64: + case FILTER_OP_NE_DOUBLE_S64: + case FILTER_OP_GT_DOUBLE_S64: + case FILTER_OP_LT_DOUBLE_S64: + case FILTER_OP_GE_DOUBLE_S64: + case FILTER_OP_LE_DOUBLE_S64: + case FILTER_OP_EQ_S64_DOUBLE: + case FILTER_OP_NE_S64_DOUBLE: + case FILTER_OP_GT_S64_DOUBLE: + case FILTER_OP_LT_S64_DOUBLE: + case FILTER_OP_GE_S64_DOUBLE: + case FILTER_OP_LE_S64_DOUBLE: + case FILTER_OP_UNARY_PLUS_DOUBLE: + case FILTER_OP_UNARY_MINUS_DOUBLE: + case FILTER_OP_UNARY_NOT_DOUBLE: + case FILTER_OP_LOAD_FIELD_REF_DOUBLE: + case FILTER_OP_GET_CONTEXT_REF_DOUBLE: + case FILTER_OP_LOAD_DOUBLE: + case FILTER_OP_CAST_DOUBLE_TO_S64: + { + printk(KERN_WARNING "LTTng: bytecode: unsupported bytecode op %u\n", + (unsigned int) *(filter_opcode_t *) pc); + ret = -EINVAL; + goto end; + } + + case FILTER_OP_EQ: + case FILTER_OP_NE: + case FILTER_OP_GT: + case FILTER_OP_LT: + case FILTER_OP_GE: + case FILTER_OP_LE: + case FILTER_OP_EQ_STRING: + case FILTER_OP_NE_STRING: + case FILTER_OP_GT_STRING: + case FILTER_OP_LT_STRING: + case FILTER_OP_GE_STRING: + case FILTER_OP_LE_STRING: + case FILTER_OP_EQ_STAR_GLOB_STRING: + case FILTER_OP_NE_STAR_GLOB_STRING: + case FILTER_OP_EQ_S64: + case FILTER_OP_NE_S64: + case FILTER_OP_GT_S64: + case FILTER_OP_LT_S64: + case FILTER_OP_GE_S64: + case FILTER_OP_LE_S64: + case FILTER_OP_BIT_RSHIFT: + case FILTER_OP_BIT_LSHIFT: + case FILTER_OP_BIT_AND: + case FILTER_OP_BIT_OR: + case FILTER_OP_BIT_XOR: + { + next_pc += sizeof(struct binary_op); + break; + } + + /* unary */ + case FILTER_OP_UNARY_PLUS: + case FILTER_OP_UNARY_MINUS: + case FILTER_OP_UNARY_PLUS_S64: + case FILTER_OP_UNARY_MINUS_S64: + case FILTER_OP_UNARY_NOT_S64: + case FILTER_OP_UNARY_NOT: + case FILTER_OP_UNARY_BIT_NOT: + { + next_pc += sizeof(struct unary_op); + break; + } + + /* logical */ + case FILTER_OP_AND: + case FILTER_OP_OR: + { + next_pc += sizeof(struct logical_op); + break; + } + + /* load field ref */ + case FILTER_OP_LOAD_FIELD_REF: + /* get context ref */ + case FILTER_OP_GET_CONTEXT_REF: + { + next_pc += sizeof(struct load_op) + sizeof(struct field_ref); + break; + } + case FILTER_OP_LOAD_FIELD_REF_STRING: + case FILTER_OP_LOAD_FIELD_REF_SEQUENCE: + case FILTER_OP_GET_CONTEXT_REF_STRING: + case FILTER_OP_LOAD_FIELD_REF_USER_STRING: + case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE: + case FILTER_OP_LOAD_FIELD_REF_S64: + case FILTER_OP_GET_CONTEXT_REF_S64: + { + /* + * Reject specialized load field ref instructions. + */ + ret = -EINVAL; + goto end; + } + + /* load from immediate operand */ + case FILTER_OP_LOAD_STRING: + case FILTER_OP_LOAD_STAR_GLOB_STRING: + { + struct load_op *insn = (struct load_op *) pc; + + next_pc += sizeof(struct load_op) + strlen(insn->data) + 1; + break; + } + + case FILTER_OP_LOAD_S64: + { + next_pc += sizeof(struct load_op) + sizeof(struct literal_numeric); + break; + } + + case FILTER_OP_CAST_TO_S64: + case FILTER_OP_CAST_NOP: + { + next_pc += sizeof(struct cast_op); + break; + } + + /* + * Instructions for recursive traversal through composed types. + */ + case FILTER_OP_GET_CONTEXT_ROOT: + case FILTER_OP_GET_APP_CONTEXT_ROOT: + case FILTER_OP_GET_PAYLOAD_ROOT: + case FILTER_OP_LOAD_FIELD: + { + next_pc += sizeof(struct load_op); + break; + } + + case FILTER_OP_LOAD_FIELD_S8: + case FILTER_OP_LOAD_FIELD_S16: + case FILTER_OP_LOAD_FIELD_S32: + case FILTER_OP_LOAD_FIELD_S64: + case FILTER_OP_LOAD_FIELD_U8: + case FILTER_OP_LOAD_FIELD_U16: + case FILTER_OP_LOAD_FIELD_U32: + case FILTER_OP_LOAD_FIELD_U64: + case FILTER_OP_LOAD_FIELD_STRING: + case FILTER_OP_LOAD_FIELD_SEQUENCE: + case FILTER_OP_LOAD_FIELD_DOUBLE: + { + /* + * Reject specialized load field instructions. + */ + ret = -EINVAL; + goto end; + } + + case FILTER_OP_GET_SYMBOL: + case FILTER_OP_GET_SYMBOL_FIELD: + { + next_pc += sizeof(struct load_op) + sizeof(struct get_symbol); + break; + } + + case FILTER_OP_GET_INDEX_U16: + { + next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16); + break; + } + + case FILTER_OP_GET_INDEX_U64: + { + next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64); + break; + } + + } +end: + *_next_pc = next_pc; + return ret; +} + /* * Return value: * >0: going to next insn. @@ -1148,14 +1356,34 @@ int exec_insn(struct bytecode_runtime *bytecode, goto end; } + case FILTER_OP_RETURN_S64: + { + if (!vstack_ax(stack)) { + printk(KERN_WARNING "Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_S64: + break; + default: + case REG_TYPE_UNKNOWN: + printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + + ret = 0; + goto end; + } + /* binary */ case FILTER_OP_MUL: case FILTER_OP_DIV: case FILTER_OP_MOD: case FILTER_OP_PLUS: case FILTER_OP_MINUS: - case FILTER_OP_RSHIFT: - case FILTER_OP_LSHIFT: /* Floating point */ case FILTER_OP_EQ_DOUBLE: case FILTER_OP_NE_DOUBLE: @@ -1209,6 +1437,8 @@ int exec_insn(struct bytecode_runtime *bytecode, case FILTER_OP_LT_S64: case FILTER_OP_GE_S64: case FILTER_OP_LE_S64: + case FILTER_OP_BIT_RSHIFT: + case FILTER_OP_BIT_LSHIFT: case FILTER_OP_BIT_AND: case FILTER_OP_BIT_OR: case FILTER_OP_BIT_XOR: @@ -1317,6 +1547,31 @@ int exec_insn(struct bytecode_runtime *bytecode, break; } + case FILTER_OP_UNARY_BIT_NOT: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + printk(KERN_WARNING "Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_TYPE_UNKNOWN: + break; + case REG_DOUBLE: + default: + printk(KERN_WARNING "Unexpected register type %d for operation\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct unary_op); + break; + } + /* logical */ case FILTER_OP_AND: case FILTER_OP_OR: @@ -1620,6 +1875,32 @@ end: return ret; } +int lttng_filter_validate_bytecode_load(struct bytecode_runtime *bytecode) +{ + char *pc, *next_pc, *start_pc; + int ret = -EINVAL; + + start_pc = &bytecode->code[0]; + for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; + pc = next_pc) { + ret = bytecode_validate_overflow(bytecode, start_pc, pc); + if (ret != 0) { + if (ret == -ERANGE) + printk(KERN_WARNING "LTTng: bytecode: bytecode overflow\n"); + goto end; + } + dbg_printk("Validating loads: op %s (%u)\n", + lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), + (unsigned int) *(filter_opcode_t *) pc); + + ret = validate_load(&next_pc, pc); + if (ret) + goto end; + } +end: + return ret; +} + /* * Never called concurrently (hash seed is shared). */