diff --git a/benches/vm_execution.rs b/benches/vm_execution.rs index bbecee34..505f927f 100644 --- a/benches/vm_execution.rs +++ b/benches/vm_execution.rs @@ -163,7 +163,7 @@ static ADDRESS_TRANSLATION_STACK_CODE: &str = " and r1, 4095 mov r3, r10 sub r3, r1 - sub r3, 1 + add r3, -1 ldxb r4, [r3] add r2, 1 jlt r2, 0x10000, -8 @@ -234,7 +234,7 @@ fn bench_jit_vs_interpreter_call_depth_fixed(bencher: &mut Bencher) { jgt r6, 0, +1 exit mov r1, r6 - sub r1, 1 + add r1, -1 call function_foo exit", Config { @@ -264,7 +264,7 @@ fn bench_jit_vs_interpreter_call_depth_dynamic(bencher: &mut Bencher) { mov r6, r1 jeq r6, 0, +3 mov r1, r6 - sub r1, 1 + add r1, -1 call function_foo add r11, 4 exit", diff --git a/src/ebpf.rs b/src/ebpf.rs index 7c118c74..06c98973 100644 --- a/src/ebpf.rs +++ b/src/ebpf.rs @@ -242,7 +242,7 @@ pub const ST_DW_XADD: u8 = BPF_STX | BPF_XADD | BPF_DW; pub const ADD32_IMM: u8 = BPF_ALU | BPF_K | BPF_ADD; /// BPF opcode: `add32 dst, src` /// `dst += src`. pub const ADD32_REG: u8 = BPF_ALU | BPF_X | BPF_ADD; -/// BPF opcode: `sub32 dst, imm` /// `dst -= imm`. +/// BPF opcode: `sub32 dst, imm` /// `dst = imm - dst`. pub const SUB32_IMM: u8 = BPF_ALU | BPF_K | BPF_SUB; /// BPF opcode: `sub32 dst, src` /// `dst -= src`. pub const SUB32_REG: u8 = BPF_ALU | BPF_X | BPF_SUB; diff --git a/src/elf.rs b/src/elf.rs index c3c6d936..ee53c2b9 100644 --- a/src/elf.rs +++ b/src/elf.rs @@ -220,6 +220,16 @@ pub enum SBPFVersion { } impl SBPFVersion { + /// Enable the negation instruction + pub fn enable_neg(&self) -> bool { + self == &SBPFVersion::V1 + } + + /// Swaps the reg and imm operands of the subtraction instruction + pub fn swap_sub_reg_imm_operands(&self) -> bool { + self != &SBPFVersion::V1 + } + /// Disable the only two slots long instruction: LD_DW_IMM pub fn disable_lddw(&self) -> bool { self != &SBPFVersion::V1 diff --git a/src/interpreter.rs b/src/interpreter.rs index 62c87f9e..558caf95 100644 --- a/src/interpreter.rs +++ b/src/interpreter.rs @@ -264,7 +264,11 @@ impl<'a, 'b, V: Verifier, C: ContextObject> Interpreter<'a, 'b, V, C> { // BPF_ALU class ebpf::ADD32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_add(insn.imm as i32) as u64, ebpf::ADD32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_add(self.reg[src] as i32) as u64, - ebpf::SUB32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_sub(insn.imm as i32) as u64, + ebpf::SUB32_IMM => if self.executable.get_sbpf_version().swap_sub_reg_imm_operands() { + self.reg[dst] = (insn.imm as i32).wrapping_sub(self.reg[dst] as i32) as u64 + } else { + self.reg[dst] = (self.reg[dst] as i32).wrapping_sub(insn.imm as i32) as u64 + }, ebpf::SUB32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_sub(self.reg[src] as i32) as u64, ebpf::MUL32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(insn.imm as i32) as u64, ebpf::MUL32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(self.reg[src] as i32) as u64, @@ -298,7 +302,7 @@ impl<'a, 'b, V: Verifier, C: ContextObject> Interpreter<'a, 'b, V, C> { ebpf::LSH32_REG => self.reg[dst] = (self.reg[dst] as u32).wrapping_shl(self.reg[src] as u32) as u64, ebpf::RSH32_IMM => self.reg[dst] = (self.reg[dst] as u32).wrapping_shr(insn.imm as u32) as u64, ebpf::RSH32_REG => self.reg[dst] = (self.reg[dst] as u32).wrapping_shr(self.reg[src] as u32) as u64, - ebpf::NEG32 => self.reg[dst] = (self.reg[dst] as i32).wrapping_neg() as u64 & (u32::MAX as u64), + ebpf::NEG32 if self.executable.get_sbpf_version().enable_neg() => self.reg[dst] = (self.reg[dst] as i32).wrapping_neg() as u64 & (u32::MAX as u64), ebpf::MOD32_IMM => self.reg[dst] = (self.reg[dst] as u32 % insn.imm as u32) as u64, ebpf::MOD32_REG => { if self.reg[src] as u32 == 0 { @@ -336,7 +340,11 @@ impl<'a, 'b, V: Verifier, C: ContextObject> Interpreter<'a, 'b, V, C> { // BPF_ALU64 class ebpf::ADD64_IMM => self.reg[dst] = self.reg[dst].wrapping_add(insn.imm as u64), ebpf::ADD64_REG => self.reg[dst] = self.reg[dst].wrapping_add(self.reg[src]), - ebpf::SUB64_IMM => self.reg[dst] = self.reg[dst].wrapping_sub(insn.imm as u64), + ebpf::SUB64_IMM => if self.executable.get_sbpf_version().swap_sub_reg_imm_operands() { + self.reg[dst] = (insn.imm as u64).wrapping_sub(self.reg[dst]) + } else { + self.reg[dst] = self.reg[dst].wrapping_sub(insn.imm as u64) + }, ebpf::SUB64_REG => self.reg[dst] = self.reg[dst].wrapping_sub(self.reg[src]), ebpf::MUL64_IMM => self.reg[dst] = self.reg[dst].wrapping_mul(insn.imm as u64), ebpf::MUL64_REG => self.reg[dst] = self.reg[dst].wrapping_mul(self.reg[src]), @@ -370,7 +378,7 @@ impl<'a, 'b, V: Verifier, C: ContextObject> Interpreter<'a, 'b, V, C> { ebpf::LSH64_REG => self.reg[dst] = self.reg[dst].wrapping_shl(self.reg[src] as u32), ebpf::RSH64_IMM => self.reg[dst] = self.reg[dst].wrapping_shr(insn.imm as u32), ebpf::RSH64_REG => self.reg[dst] = self.reg[dst].wrapping_shr(self.reg[src] as u32), - ebpf::NEG64 => self.reg[dst] = (self.reg[dst] as i64).wrapping_neg() as u64, + ebpf::NEG64 if self.executable.get_sbpf_version().enable_neg() => self.reg[dst] = (self.reg[dst] as i64).wrapping_neg() as u64, ebpf::MOD64_IMM => self.reg[dst] %= insn.imm as u64, ebpf::MOD64_REG => { if self.reg[src] == 0 { diff --git a/src/jit.rs b/src/jit.rs index 6201364d..4e2f7429 100644 --- a/src/jit.rs +++ b/src/jit.rs @@ -468,7 +468,14 @@ impl<'a, V: Verifier, C: ContextObject> JitCompiler<'a, V, C> { self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 }, ebpf::SUB32_IMM => { - self.emit_sanitized_alu(OperandSize::S32, 0x29, 5, dst, insn.imm); + if self.executable.get_sbpf_version().swap_sub_reg_imm_operands() { + self.emit_ins(X86Instruction::alu(OperandSize::S32, 0xf7, 3, dst, 0, None)); + if insn.imm != 0 { + self.emit_sanitized_alu(OperandSize::S32, 0x01, 0, dst, insn.imm); + } + } else { + self.emit_sanitized_alu(OperandSize::S32, 0x29, 5, dst, insn.imm); + } self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 }, ebpf::SUB32_REG => { @@ -487,7 +494,7 @@ impl<'a, V: Verifier, C: ContextObject> JitCompiler<'a, V, C> { ebpf::LSH32_REG => self.emit_shift(OperandSize::S32, 4, src, dst, None), ebpf::RSH32_IMM => self.emit_shift(OperandSize::S32, 5, R11, dst, Some(insn.imm)), ebpf::RSH32_REG => self.emit_shift(OperandSize::S32, 5, src, dst, None), - ebpf::NEG32 => self.emit_ins(X86Instruction::alu(OperandSize::S32, 0xf7, 3, dst, 0, None)), + ebpf::NEG32 if self.executable.get_sbpf_version().enable_neg() => self.emit_ins(X86Instruction::alu(OperandSize::S32, 0xf7, 3, dst, 0, None)), ebpf::XOR32_IMM => self.emit_sanitized_alu(OperandSize::S32, 0x31, 6, dst, insn.imm), ebpf::XOR32_REG => self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x31, src, dst, 0, None)), ebpf::MOV32_IMM => { @@ -531,7 +538,16 @@ impl<'a, V: Verifier, C: ContextObject> JitCompiler<'a, V, C> { // BPF_ALU64 class ebpf::ADD64_IMM => self.emit_sanitized_alu(OperandSize::S64, 0x01, 0, dst, insn.imm), ebpf::ADD64_REG => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, src, dst, 0, None)), - ebpf::SUB64_IMM => self.emit_sanitized_alu(OperandSize::S64, 0x29, 5, dst, insn.imm), + ebpf::SUB64_IMM => { + if self.executable.get_sbpf_version().swap_sub_reg_imm_operands() { + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xf7, 3, dst, 0, None)); + if insn.imm != 0 { + self.emit_sanitized_alu(OperandSize::S64, 0x01, 0, dst, insn.imm); + } + } else { + self.emit_sanitized_alu(OperandSize::S64, 0x29, 5, dst, insn.imm); + } + } ebpf::SUB64_REG => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, src, dst, 0, None)), ebpf::MUL64_IMM | ebpf::DIV64_IMM | ebpf::SDIV64_IMM | ebpf::MOD64_IMM => self.emit_muldivmod(insn.opc, dst, dst, Some(insn.imm)), @@ -545,7 +561,7 @@ impl<'a, V: Verifier, C: ContextObject> JitCompiler<'a, V, C> { ebpf::LSH64_REG => self.emit_shift(OperandSize::S64, 4, src, dst, None), ebpf::RSH64_IMM => self.emit_shift(OperandSize::S64, 5, R11, dst, Some(insn.imm)), ebpf::RSH64_REG => self.emit_shift(OperandSize::S64, 5, src, dst, None), - ebpf::NEG64 => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xf7, 3, dst, 0, None)), + ebpf::NEG64 if self.executable.get_sbpf_version().enable_neg() => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xf7, 3, dst, 0, None)), ebpf::XOR64_IMM => self.emit_sanitized_alu(OperandSize::S64, 0x31, 6, dst, insn.imm), ebpf::XOR64_REG => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x31, src, dst, 0, None)), ebpf::MOV64_IMM => { diff --git a/src/verifier.rs b/src/verifier.rs index 67aca944..6f455f35 100644 --- a/src/verifier.rs +++ b/src/verifier.rs @@ -288,7 +288,7 @@ impl Verifier for RequisiteVerifier { ebpf::LSH32_REG => {}, ebpf::RSH32_IMM => { check_imm_shift(&insn, insn_ptr, 32)?; }, ebpf::RSH32_REG => {}, - ebpf::NEG32 => {}, + ebpf::NEG32 if sbpf_version.enable_neg() => {}, ebpf::MOD32_IMM => { check_imm_nonzero(&insn, insn_ptr)?; }, ebpf::MOD32_REG => {}, ebpf::XOR32_IMM => {}, @@ -319,7 +319,7 @@ impl Verifier for RequisiteVerifier { ebpf::LSH64_REG => {}, ebpf::RSH64_IMM => { check_imm_shift(&insn, insn_ptr, 64)?; }, ebpf::RSH64_REG => {}, - ebpf::NEG64 => {}, + ebpf::NEG64 if sbpf_version.enable_neg() => {}, ebpf::MOD64_IMM => { check_imm_nonzero(&insn, insn_ptr)?; }, ebpf::MOD64_REG => {}, ebpf::XOR64_IMM => {}, diff --git a/tests/execution.rs b/tests/execution.rs index 59c0efbe..1b2d83d4 100644 --- a/tests/execution.rs +++ b/tests/execution.rs @@ -253,11 +253,16 @@ fn test_add32() { #[test] fn test_neg32() { + let config = Config { + enable_sbpf_v2: false, + ..Config::default() + }; test_interpreter_and_jit_asm!( " mov32 r0, 2 neg32 r0 exit", + config, [], (), TestContextObject::new(3), @@ -267,11 +272,16 @@ fn test_neg32() { #[test] fn test_neg64() { + let config = Config { + enable_sbpf_v2: false, + ..Config::default() + }; test_interpreter_and_jit_asm!( " mov32 r0, 2 neg r0 exit", + config, [], (), TestContextObject::new(3), @@ -293,10 +303,10 @@ fn test_alu32_arithmetic() { mov32 r7, 7 mov32 r8, 8 mov32 r9, 9 - add32 r0, 23 - add32 r0, r7 sub32 r0, 13 sub32 r0, r1 + add32 r0, 23 + add32 r0, r7 mul32 r0, 7 mul32 r0, r3 div32 r0, 2 @@ -305,7 +315,7 @@ fn test_alu32_arithmetic() { [], (), TestContextObject::new(19), - ProgramResult::Ok(0x2a), + ProgramResult::Ok(110), ); } @@ -323,10 +333,10 @@ fn test_alu64_arithmetic() { mov r7, 7 mov r8, 8 mov r9, 9 - add r0, 23 - add r0, r7 sub r0, 13 sub r0, r1 + add r0, 23 + add r0, r7 mul r0, 7 mul r0, r3 div r0, 2 @@ -335,7 +345,7 @@ fn test_alu64_arithmetic() { [], (), TestContextObject::new(19), - ProgramResult::Ok(0x2a), + ProgramResult::Ok(110), ); } @@ -542,7 +552,7 @@ fn test_rhs32_imm() { test_interpreter_and_jit_asm!( " xor r0, r0 - sub r0, 1 + add r0, -1 rsh32 r0, 8 exit", [], @@ -3366,7 +3376,7 @@ fn test_symbol_relocation() { test_interpreter_and_jit_asm!( " mov64 r1, r10 - sub64 r1, 0x1 + add64 r1, -0x1 mov64 r2, 0x1 syscall bpf_syscall_string mov64 r0, 0x0