diff --git a/Arm/Insts/Common.lean b/Arm/Insts/Common.lean index 38b59dc4..58c5068a 100644 --- a/Arm/Insts/Common.lean +++ b/Arm/Insts/Common.lean @@ -667,7 +667,7 @@ def RShr (unsigned : Bool) (value : Int) (shift : Nat) (round : Bool) @[state_simp_rules] def Int_with_unsigned (unsigned : Bool) (value : BitVec n) : Int := - if unsigned then value.toNat else value.toInt + if unsigned then Int.ofNat value.toNat else value.toInt def shift_right_common_aux (e : Nat) (info : ShiftInfo) (operand : BitVec datasize) @@ -683,6 +683,191 @@ def shift_right_common_aux shift_right_common_aux (e + 1) info operand operand2 result termination_by (info.elements - e) +-- FIXME: should this be upstreamed? +theorem shift_le (x : Nat) (shift :Nat) : + x >>> shift ≤ x := by + simp only [Nat.shiftRight_eq_div_pow] + exact Nat.div_le_self x (2 ^ shift) + +set_option bv.ac_nf false + +@[state_simp_rules] +theorem shift_right_common_aux_64_2_tff (operand : BitVec 128) + (shift : Nat) (result : BitVec 128): + shift_right_common_aux 0 + {esize := 64, elements := 2, shift := shift, + unsigned := true, round := false, accumulate := false} + operand 0#128 result = + (ushiftRight (extractLsb' 64 64 operand) shift) + ++ (ushiftRight (extractLsb' 0 64 operand) shift) := by + unfold shift_right_common_aux + simp only [minimal_theory, bitvec_rules] + unfold shift_right_common_aux + simp only [minimal_theory, bitvec_rules] + unfold shift_right_common_aux + simp only [minimal_theory, bitvec_rules] + simp only [state_simp_rules, + minimal_theory, + -- FIXME: simply using bitvec_rules will expand out setWidth + -- bitvec_rules, + BitVec.cast_eq, + Nat.shiftRight_zero, + Nat.zero_shiftRight, + Nat.reduceMul, + Nat.reduceAdd, + Nat.add_one_sub_one, + Nat.sub_zero, + reduceAllOnes, + reduceZeroExtend, + Nat.zero_mul, + shiftLeft_zero_eq, + reduceNot, + BitVec.extractLsb_ofNat, + Nat.reducePow, + Nat.zero_mod, + Int.ofNat_emod, + Int.Nat.cast_ofNat_Int, + BitVec.zero_add, + Nat.reduceSub, + Nat.one_mul, + reduceHShiftLeft, + -- FIXME: should partInstall be state_simp_rules? + partInstall, + -- Eliminating casting functions + Int.ofNat_eq_coe, ofInt_natCast, ofNat_toNat + ] + generalize (extractLsb' 64 64 operand) = x + generalize (extractLsb' 0 64 operand) = y + have h0 : ∀ (z : BitVec 64), extractLsb' 0 64 ((zeroExtend 65 z).ushiftRight shift) + = z.ushiftRight shift := by + intro z + simp only [ushiftRight, toNat_setWidth] + have h1: z.toNat % 2 ^ 65 = z.toNat := by omega + simp only [h1] + simp only [Std.Tactic.BVDecide.Normalize.BitVec.ofNatLt_reduce] + simp only [Nat.sub_zero, Nat.reduceAdd, BitVec.extractLsb'_ofNat, Nat.shiftRight_zero] + have h2 : z.toNat >>> shift % 2 ^ 65 = z.toNat >>> shift := by + refine Nat.mod_eq_of_lt ?h3 + have h4 : z.toNat >>> shift ≤ z.toNat := by exact shift_le z.toNat shift + omega + simp only [h2] + simp only [h0] + clear h0 + generalize x.ushiftRight shift = p + generalize y.ushiftRight shift = q + -- FIXME: This proof can be simplified once bv_decide supports shift + -- operations with variable offsets + bv_decide + +-- FIXME: where to put this? +theorem ofInt_eq_signExtend (x : BitVec 32) : + BitVec.ofInt 33 x.toInt = signExtend 33 x := by + exact rfl + +-- FIXME: where to put this? +theorem msb_signExtend (x : BitVec n) (hw: n < n'): + (signExtend n' x).msb = x.msb := by + rcases n' with rfl | n' + · simp only [show n = 0 by omega, + msb_eq_getLsbD_last, Nat.zero_sub, Nat.le_refl, + getLsbD_ge] + · simp only [msb_eq_getLsbD_last, Nat.add_one_sub_one, + getLsbD_signExtend, Nat.lt_add_one, + decide_True, Bool.true_and, ite_eq_right_iff] + by_cases h : n' < n + · rcases n with rfl | n + · simp + · simp only [h, Nat.add_one_sub_one, true_implies] + omega + · simp [h] + +theorem shift_right_common_aux_32_4_fff (operand : BitVec 128) + (shift : Nat) (result : BitVec 128): + shift_right_common_aux 0 + { esize := 32, elements := 4, shift := shift, + unsigned := false, round := false, accumulate := false} + operand 0#128 result = + (sshiftRight (extractLsb' 96 32 operand) shift) + ++ (sshiftRight (extractLsb' 64 32 operand) shift) + ++ (sshiftRight (extractLsb' 32 32 operand) shift) + ++ (sshiftRight (extractLsb' 0 32 operand) shift) := by + unfold shift_right_common_aux + simp only [minimal_theory, bitvec_rules] + unfold shift_right_common_aux + simp only [minimal_theory, bitvec_rules] + unfold shift_right_common_aux + simp only [minimal_theory, bitvec_rules] + unfold shift_right_common_aux + simp only [minimal_theory, bitvec_rules] + unfold shift_right_common_aux + simp only [minimal_theory, bitvec_rules] + simp only [state_simp_rules, + minimal_theory, + -- FIXME: simply using bitvec_rules will expand out setWidth + -- bitvec_rules, + BitVec.cast_eq, + Nat.shiftRight_zero, + Nat.zero_shiftRight, + Nat.reduceMul, + Nat.reduceAdd, + Nat.add_one_sub_one, + Nat.sub_zero, + reduceAllOnes, + reduceZeroExtend, + Nat.zero_mul, + shiftLeft_zero_eq, + reduceNot, + BitVec.extractLsb_ofNat, + Nat.reducePow, + Nat.zero_mod, + Int.ofNat_emod, + Int.Nat.cast_ofNat_Int, + BitVec.zero_add, + Nat.reduceSub, + Nat.one_mul, + reduceHShiftLeft, + partInstall, + -- Eliminating casting functions + ofInt_eq_signExtend + ] + generalize extractLsb' 0 32 operand = a + generalize extractLsb' 32 32 operand = b + generalize extractLsb' 64 32 operand = c + generalize extractLsb' 96 32 operand = d + have h : ∀ (x : BitVec 32), + extractLsb' 0 32 ((signExtend 33 x).sshiftRight shift) + = x.sshiftRight shift := by + intros x + apply eq_of_getLsbD_eq; intros i; simp at i + simp only [getLsbD_sshiftRight] + simp only [Nat.sub_zero, Nat.reduceAdd, getLsbD_extractLsb', Nat.zero_add, + getLsbD_sshiftRight, getLsbD_signExtend] + simp only [show (i : Nat) < 32 by omega, + decide_True, Bool.true_and] + simp only [show ¬33 ≤ (i : Nat) by omega, + decide_False, Bool.not_false, Bool.true_and] + simp only [show ¬32 ≤ (i : Nat) by omega, + decide_False, Bool.not_false, Bool.true_and] + by_cases h : shift + (i : Nat) < 32 + · simp only [h, reduceIte] + simp only [show shift + (i : Nat) < 33 by omega, + ↓reduceIte, decide_True, Bool.true_and] + · simp only [h, reduceIte] + have icases : shift + (i : Nat) = 32 ∨ 32 < shift + (i : Nat) := by omega + rcases icases with (h' | h') + · simp only [h', Nat.lt_add_one, ↓reduceIte, decide_True, Bool.true_and] + · simp only [show ¬(shift + (i : Nat) < 33) by omega, ↓reduceIte] + apply msb_signExtend; trivial + simp only [h] + clear h + generalize a.sshiftRight shift = a + generalize b.sshiftRight shift = b + generalize c.sshiftRight shift = c + generalize d.sshiftRight shift = d + -- FIXME: This proof can be simplified once bv_decide supports shift + -- operations with variable offsets + bv_decide + @[state_simp_rules] def shift_right_common (info : ShiftInfo) (datasize : Nat) (Rn : BitVec 5) (Rd : BitVec 5) @@ -705,6 +890,24 @@ def shift_left_common_aux shift_left_common_aux (e + 1) info operand result termination_by (info.elements - e) +theorem shift_left_common_aux_64_2 (operand : BitVec 128) + (shift : Nat) (unsigned: Bool) (round : Bool) (accumulate : Bool) + (result : BitVec 128): + shift_left_common_aux 0 + {esize := 64, elements := 2, shift := shift, + unsigned := unsigned, round := round, accumulate := accumulate} + operand result = + (extractLsb' 64 64 operand <<< shift) + ++ (extractLsb' 0 64 operand <<< shift) := by + unfold shift_left_common_aux + simp only [minimal_theory, bitvec_rules] + unfold shift_left_common_aux + simp only [minimal_theory, bitvec_rules] + unfold shift_left_common_aux + simp only [minimal_theory, bitvec_rules] + simp only [state_simp_rules, minimal_theory, bitvec_rules, partInstall] + bv_decide + @[state_simp_rules] def shift_left_common (info : ShiftInfo) (datasize : Nat) (Rn : BitVec 5) (s : ArmState) diff --git a/Arm/Insts/DPSFP/Advanced_simd_copy.lean b/Arm/Insts/DPSFP/Advanced_simd_copy.lean index 972d8357..bb383d7c 100644 --- a/Arm/Insts/DPSFP/Advanced_simd_copy.lean +++ b/Arm/Insts/DPSFP/Advanced_simd_copy.lean @@ -25,6 +25,24 @@ def dup_aux (e : Nat) (elements : Nat) (esize : Nat) dup_aux (e + 1) elements esize element result termination_by (elements - e) +set_option bv.ac_nf false + +theorem dup_aux_0_4_32 (element : BitVec 32) (result : BitVec 128) : + dup_aux 0 4 32 element result + = element ++ element ++ element ++ element := by + unfold dup_aux + simp [minimal_theory] + unfold dup_aux + simp [minimal_theory] + unfold dup_aux + simp [minimal_theory] + unfold dup_aux + simp [minimal_theory] + unfold dup_aux + simp [minimal_theory] + simp [state_simp_rules, partInstall] + bv_decide + @[state_simp_rules] def exec_dup_element (inst : Advanced_simd_copy_cls) (s : ArmState) : ArmState := let size := lowest_set_bit inst.imm5 diff --git a/Proofs/AES-GCM/GCMInitV8Sym.lean b/Proofs/AES-GCM/GCMInitV8Sym.lean index 5683d0c7..e4e47c8a 100644 --- a/Proofs/AES-GCM/GCMInitV8Sym.lean +++ b/Proofs/AES-GCM/GCMInitV8Sym.lean @@ -11,11 +11,12 @@ import Specs.GCMV8 namespace GCMInitV8Program +set_option bv.ac_nf false + abbrev H_addr (s : ArmState) : BitVec 64 := r (StateField.GPR 1#5) s abbrev Htable_addr (s : ArmState) : BitVec 64 := r (StateField.GPR 0#5) s --- set_option debug.byAsSorry true in -set_option maxRecDepth 1000000 in +set_option maxRecDepth 8000 in -- set_option profiler true in theorem gcm_init_v8_program_run_152 (s0 sf : ArmState) (h_s0_program : s0.program = gcm_init_v8_program) @@ -33,8 +34,13 @@ theorem gcm_init_v8_program_run_152 (s0 sf : ArmState) sym_n 152 done -set_option maxRecDepth 1000000 in -set_option maxHeartbeats 2000000 in +set_option maxRecDepth 100000 in +set_option maxHeartbeats 500000 in +set_option sat.timeout 120 in +-- set_option pp.deepTerms true in +-- set_option pp.maxSteps 10000 in +-- set_option trace.profiler true in +-- set_option linter.unusedVariables false in -- set_option profiler true in theorem gcm_init_v8_program_correct (s0 sf : ArmState) (h_s0_program : s0.program = gcm_init_v8_program) @@ -42,7 +48,7 @@ theorem gcm_init_v8_program_correct (s0 sf : ArmState) (h_s0_pc : read_pc s0 = gcm_init_v8_program.min) (h_s0_sp_aligned : CheckSPAlignment s0) (h_run : sf = run gcm_init_v8_program.length s0) - (_h_mem : Memory.Region.pairwiseSeparate + (h_mem : Memory.Region.pairwiseSeparate [ ⟨(H_addr s0), 128⟩, ⟨(Htable_addr s0), 2048⟩ ]) : -- effects @@ -59,8 +65,10 @@ theorem gcm_init_v8_program_correct (s0 sf : ArmState) -- H_addr ptr stays the same ∧ H_addr sf = H_addr s0 -- v20 - v31 stores results of Htable - ∧ read_sfp 128 20#5 sf = (GCMV8.GCMInitV8 (read_mem_bytes 16 (H_addr s0) s0)).get! 0 - -- ∧ read_sfp 128 21#5 sf = (GCMV8.GCMInitV8 (read_mem_bytes 16 (H_addr s0) s0)).get! 1 + ∧ let Hinit := (read_mem_bytes 16 (H_addr s0) s0) + read_sfp 128 20#5 sf = + (GCMV8.GCMInitV8 ((BitVec.extractLsb' 0 64 Hinit) ++ (BitVec.extractLsb' 64 64 Hinit))).get! 0 + -- -- TODO: Commenting out memory related conjuncts since it seems -- to make symbolic execution stuck -- -- First 12 elements in Htable is correct @@ -73,6 +81,7 @@ theorem gcm_init_v8_program_correct (s0 sf : ArmState) -- ∧ (∀ (f : StateField), ¬ (f = StateField.PC) ∧ -- ¬ (f = (StateField.GPR 29#5)) → -- r f sf = r f s0) + -- -- -- Memory safety: memory location that should not change did -- -- not change -- -- The addr exclude output region Htable @@ -84,6 +93,32 @@ theorem gcm_init_v8_program_correct (s0 sf : ArmState) -- unable to be reflected sym_n 152 simp only [Htable_addr, state_value] -- TODO: state_value is needed, why - apply And.intro + apply And.intro · bv_decide · sorry + -- [Shilpi] Commenting out the following because the CI fails with + -- "INTERNAL PANIC: out of memory" + /- + simp only + [shift_left_common_aux_64_2 + , shift_right_common_aux_64_2_tff + , shift_right_common_aux_32_4_fff + , DPSFP.AdvSIMDExpandImm + , DPSFP.dup_aux_0_4_32] + generalize read_mem_bytes 16 (r (StateField.GPR 1#5) s0) s0 = Hinit + -- change the type of Hinit to be BitVec 128, assuming that's def-eq + change BitVec 128 at Hinit + simp only [GCMV8.GCMInitV8, GCMV8.lo, List.get!, GCMV8.hi, + GCMV8.gcm_init_H, GCMV8.refpoly, GCMV8.pmod, GCMV8.pmod.pmodTR, + GCMV8.reduce, GCMV8.degree, GCMV8.degree.degreeTR] + simp only [Nat.reduceAdd, BitVec.ushiftRight_eq, BitVec.reduceExtracLsb', + BitVec.reduceHShiftLeft, BitVec.reduceAppend, BitVec.reduceHShiftRight, BitVec.ofNat_eq_ofNat, + BitVec.reduceEq, ↓reduceIte, BitVec.zero_eq, Nat.sub_self, BitVec.ushiftRight_zero_eq, + BitVec.reduceAnd, BitVec.toNat_ofNat, Nat.pow_one, Nat.reduceMod, Nat.mul_zero, Nat.add_zero, + Nat.zero_mod, Nat.zero_add, Nat.sub_zero, Nat.mul_one, Nat.zero_mul, Nat.one_mul, + Nat.reduceSub, BitVec.reduceMul, BitVec.reduceXOr, BitVec.mul_one, Nat.add_one_sub_one, + BitVec.one_mul] + -- bv_check "GCMInitV8Sym.lean-GCMInitV8Program.gcm_init_v8_program_correct-117-4.lrat" + -- TODO: proof works in vscode but timeout in the CI -- need to investigate further + -/ + diff --git a/Proofs/AES-GCM/GCMInitV8Sym.lean-GCMInitV8Program.gcm_init_v8_program_correct-117-4.lrat b/Proofs/AES-GCM/GCMInitV8Sym.lean-GCMInitV8Program.gcm_init_v8_program_correct-117-4.lrat new file mode 100644 index 00000000..a639618a Binary files /dev/null and b/Proofs/AES-GCM/GCMInitV8Sym.lean-GCMInitV8Program.gcm_init_v8_program_correct-117-4.lrat differ diff --git a/Specs/GCMV8.lean b/Specs/GCMV8.lean index 33252b91..f365cfff 100644 --- a/Specs/GCMV8.lean +++ b/Specs/GCMV8.lean @@ -50,19 +50,24 @@ def pmult (x: BitVec (m + 1)) (y : BitVec (n + 1)) : BitVec (m + n + 1) := example: pmult 0b1101#4 0b10#2 = 0b11010#5 := rfl -/-- Degree of x. -/ -private def degree (x : BitVec n) : Nat := - let rec degreeTR (x : BitVec n) (n : Nat) : Nat := +/-- Degree of x. Defined using non-ite statements. -/ +def degree (x : BitVec n) : Nat := + let rec degreeTR (x : BitVec n) (n : Nat) (i : Nat) (acc : Nat) : Nat := match n with - | 0 => 0 + | 0 => acc | m + 1 => - if getLsbD x n then n else degreeTR x m - degreeTR x (n - 1) + let is_one := extractLsb' 0 1 (x &&& 1) + degreeTR (x >>> 1) m (i + 1) (acc + is_one.toNat * (i - acc)) + degreeTR x n 0 0 + example: GCMV8.degree 0b0101#4 = 2 := rfl +example: GCMV8.degree 0b1101#4 = 3 := rfl -/-- Subtract x from y if y's x-degree-th bit is 1. -/ -private def reduce (x : BitVec n) (y : BitVec n) : BitVec n := - if getLsbD y (GCMV8.degree x) then y ^^^ x else y +/-- Subtract x from y if y's x-degree-th bit is 1. + Defined using non-ite statements. -/ +def reduce (x : BitVec n) (y : BitVec n) : BitVec n := + let is_one := (y >>> (GCMV8.degree x)) &&& 1 + y ^^^ (is_one * x) /-- Performs division of polynomials over GF(2). -/ def pdiv (x: BitVec n) (y : BitVec m): BitVec n := @@ -83,16 +88,16 @@ example : pdiv 0b1101#4 0b10#2 = 0b110#4 := rfl example : pdiv 0x1a#5 0b10#2 = 0b1101#5 := rfl example : pdiv 0b1#1 0b10#2 = 0b0#1 := rfl -/-- Performs modulus of polynomials over GF(2). -/ +/-- Performs modulus of polynomials over GF(2). + Defined using non-ite statements.-/ def pmod (x : BitVec n) (y : BitVec (m + 1)) (H : 0 < m) : BitVec m := let rec pmodTR (x : BitVec n) (y : BitVec (m + 1)) (p : BitVec (m + 1)) (i : Nat) (r : BitVec m) (H : 0 < m) : BitVec m := match i with | 0 => r | j + 1 => - let xi := getLsbD x (n - i) - let tmp := - if xi then extractLsb' 0 m p else BitVec.zero m + let is_one := extractLsb' 0 m ((x >>> (n - i)) &&& 1) + let tmp := is_one * extractLsb' 0 m p let r := r ^^^ tmp pmodTR x y (GCMV8.reduce y (p <<< 1)) j r H if y = 0 then 0 else pmodTR x y (GCMV8.reduce y 1) n (BitVec.zero m) H @@ -127,7 +132,7 @@ def refpoly : BitVec 129 := 0x1C2000000000000000000000000000001#129 See Remark 5 in paper "A New Interpretation for the GHASH Authenticator of AES-GCM" -/ -private def gcm_init_H (H : BitVec 128) : BitVec 128 := +def gcm_init_H (H : BitVec 128) : BitVec 128 := pmod (H ++ 0b0#1) refpoly (by omega) def gcm_polyval_mul (x : BitVec 128) (y : BitVec 128) : BitVec 256 := @@ -219,7 +224,7 @@ example : GCMGmultV8 0x1099f4b39468565ccdd297a9df145877#128 0x9e#8, 0x15#8, 0xa6#8, 0x00#8, 0x67#8, 0x29#8, 0x7e#8, 0x0f#8 ] := rfl -private def gcm_ghash_block (H : BitVec 128) (Xi : BitVec 128) +def gcm_ghash_block (H : BitVec 128) (Xi : BitVec 128) (inp : BitVec 128) : BitVec 128 := let H := (lo H) ++ (hi H) GCMV8.gcm_polyval H (Xi ^^^ inp)