Skip to content

Commit

Permalink
Minor edits to SHA512 sym (#203)
Browse files Browse the repository at this point in the history
### Description:

Flesh out the loop postcondition; other misc. cleanup 

### Testing:

What tests have been run? Did `make all` succeed for your changes? Was
conformance testing successful on an Aarch64 machine?

Yes

### License:

By submitting this pull request, I confirm that my contribution is
made under the terms of the Apache 2.0 license.
  • Loading branch information
shigoel authored Sep 27, 2024
1 parent 06bc3ec commit c097f65
Show file tree
Hide file tree
Showing 4 changed files with 126 additions and 175 deletions.
20 changes: 16 additions & 4 deletions Arm/Insts/Common.lean
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,12 @@ partial def GPRIndex.rand (lo := 0) (hi := 31) :

----------------------------------------------------------------------

/--
Integer addition with carry input, returning result and NZCV flags.
Ref.:
https://developer.arm.com/documentation/ddi0602/2024-06/Shared-Pseudocode/shared-functions-integer?lang=en#impl-shared.AddWithCarry.3
-/
def AddWithCarry (x : BitVec n) (y : BitVec n) (carry_in : BitVec 1) :
(BitVec n × PState) :=
let carry_in_ext := zeroExtend (n + 1) carry_in
Expand Down Expand Up @@ -90,6 +96,12 @@ theorem zeroExtend_eq_of_AddWithCarry :
(AddWithCarry x y carry_in).fst := by
simp only [zeroExtend_eq]

/--
Return `true` iff `cond` currently holds
Ref.:
https://developer.arm.com/documentation/ddi0602/2024-06/Shared-Pseudocode/shared-functions-system?lang=en#impl-shared.ConditionHolds.1
-/
def ConditionHolds (cond : BitVec 4) (s : ArmState) : Bool :=
open PFlag in
let N := read_flag N s
Expand Down Expand Up @@ -171,7 +183,7 @@ def Aligned (x : BitVec n) (a : Nat) : Prop :=

/-- We need to prove why the Aligned predicate is Decidable. -/
instance : Decidable (Aligned x a) := by
cases a <;> simp [Aligned] <;> infer_instance
cases a <;> simp only [Aligned] <;> infer_instance

theorem Aligned_BitVecSub_64_4 {x : BitVec 64} {y : BitVec 64}
(x_aligned : Aligned x 4)
Expand All @@ -184,15 +196,15 @@ theorem Aligned_BitVecAdd_64_4 {x : BitVec 64} {y : BitVec 64}
(x_aligned : Aligned x 4)
(y_aligned : Aligned y 4)
: Aligned (x + y) 4 := by
simp_all [Aligned]
simp_all only [Aligned, Nat.sub_zero, zero_eq]
bv_decide

theorem Aligned_AddWithCarry_64_4 (x : BitVec 64) (y : BitVec 64) (carry_in : BitVec 1)
(x_aligned : Aligned x 4)
(y_carry_in_aligned : Aligned (BitVec.add (extractLsb 3 0 y) (zeroExtend 4 carry_in)) 4)
: Aligned (AddWithCarry x y carry_in).fst 4 := by
unfold AddWithCarry Aligned at *
simp_all
simp_all only [Nat.sub_zero, zero_eq, add_eq]
bv_decide

/-- Check correct stack pointer (SP) alignment for AArch64 state; returns
Expand Down Expand Up @@ -576,7 +588,7 @@ example : rev_elems 8 4 (rev_elems 8 4 0xAB#8 (by decide) (by decide))

theorem rev_elems_base :
rev_elems esize esize x h₀ h₁ = x := by
unfold rev_elems; simp; done
unfold rev_elems; simp only [Nat.le_refl, ↓reduceDIte]; done

/-- Divide a bv of width `datasize` into containers, each of size
`container_size`, and within a container, reverse the order of `esize`-bit
Expand Down
2 changes: 1 addition & 1 deletion Arm/Insts/DPSFP/Advanced_simd_two_reg_misc.lean
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ Copyright (c) 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author(s): Shilpi Goel
-/
-- REV64
-- REV64/32/16

import Arm.Decode
import Arm.State
Expand Down
125 changes: 65 additions & 60 deletions Proofs/SHA512/SHA512Loop.lean
Original file line number Diff line number Diff line change
Expand Up @@ -8,59 +8,60 @@ open BitVec

/-! ## Reasoning about the SHA512 loop
We prove that at the end of an iteration of the SHA512 loop, `sha512_loop_post`
We prove that at the end of an iteration of the SHA512 loop, `loop_post`
is satisfied.
-/

namespace SHA512

-- (TODO @shilpi) Write the loop invariant.
/--
Vector instruction `REV64` that reverses the order of 16-byte elements in each
64-bit slice of the 128-bit input.
structure sha512_loop_post
(pc nblocks sp ctx_base input_base : BitVec 64)
(si : ArmState) : Prop where
h_program : si.program = program
h_pc : r .PC si = pc
h_err : r .ERR si = .None
h_sp_aligned : CheckSPAlignment si
-- No blocks must be left unhashed by the time the loop runs to completion.
h_num_blocks : num_blocks si = 0#64
h_sp : stack_ptr si = sp - 16#64
h_ctx_base : ctx_addr si = ctx_base
h_input_base : input_addr si = input_base
-- (TODO @shilpi) The ctx must contain the hash.
-- h_ctx : si[ctx_base, 64] = SHA2.h0_512.toBitVec
h_ktbl : si[ktbl_addr, (SHA2.k_512.length * 8)] = BitVec.flatten SHA2.k_512
h_mem_sep : Memory.Region.pairwiseSeparate
[(sp - 16#64, 16),
(ctx_base, 64),
(input_base, (nblocks.toNat * 128)),
(ktbl_addr, (SHA2.k_512.length * 8))]
Ref.:
https://developer.arm.com/documentation/ddi0602/2024-06/SIMD-FP-Instructions/REV64--Reverse-elements-in-64-bit-doublew
-/
def vrev64_16 (x : BitVec 128) : BitVec 128 :=
rev_vector 128 64 16 x
(by decide) (by decide) (by decide)
(by decide) (by decide)

theorem sha512_loop_post.iff_def :
(sha512_loop_post pc nblocks sp ctx_base input_base si) ↔
(si.program = program ∧
r .PC si = pc ∧
r .ERR si = .None ∧
CheckSPAlignment si ∧
num_blocks si = 0#64
stack_ptr si = sp - 16#64
ctx_addr si = ctx_base ∧
input_addr si = input_base ∧
-- si[ctx_base, 64] = SHA2.h0_512.toBitVec ∧
si[ktbl_addr, (SHA2.k_512.length * 8)] = BitVec.flatten SHA2.k_512 ∧
Memory.Region.pairwiseSeparate
[(sp - 16#64, 16),
(ctx_base, 64),
(input_base, (nblocks.toNat * 128)),
(ktbl_addr, (SHA2.k_512.length * 8))]) := by
constructor
· intro h
obtain ⟨⟩ := h
simp only [*, minimal_theory]
· intro h
constructor <;> simp only [*, minimal_theory]
done
/--
Loop postcondition when exactly one block needs to be hashed.
-/
def loop_post (PC N SP CtxBase InputBase : BitVec 64)
(si : ArmState) : Prop :=
-- TODO: Write a better spec. function.
-- let spec_digest := 0#512
-- let impl_digest :=
-- r (.SFP 3#5) si ++ r (.SFP 2#5) si ++
-- r (.SFP 1#5) si ++ r (.SFP 0#5) si
-- All blocks must be hashed.
num_blocks si = 0
si.program = program ∧
r .PC si = PC ∧
r .ERR si = .None ∧
CheckSPAlignment si ∧
ctx_addr si = CtxBase ∧
stack_ptr si = SP - 16#64
si[KtblAddr, (SHA2.k_512.length * 8)] = BitVec.flatten SHA2.k_512 ∧
Memory.Region.pairwiseSeparate
[(SP - 16#64, 16),
(CtxBase, 64),
(InputBase, (N.toNat * 128)),
(KtblAddr, (SHA2.k_512.length * 8))] ∧
r (.GPR 3#5) si = KtblAddr ∧
input_addr si = InputBase + (N * 128#64) ∧
-- Registers contain the last processed input block.
r (.SFP 16#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 0)), 16]) ∧
r (.SFP 17#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 1)), 16]) ∧
r (.SFP 18#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 2)), 16]) ∧
r (.SFP 19#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 3)), 16]) ∧
r (.SFP 20#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 4)), 16]) ∧
r (.SFP 21#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 5)), 16]) ∧
r (.SFP 22#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 6)), 16]) ∧
r (.SFP 23#5) si = vrev64_16 (si[input_addr si - (128#64 - (16#64 * 7)), 16])
-- spec_digest = impl_digest -- TODO

/- TODO: Symbolically simulate (program.length - 16 - 3) = 485 instructions
here. We elide the 16 instructions before the loop and 3 instructions after it.
Expand All @@ -77,34 +78,38 @@ set_option maxHeartbeats 0 in
set_option maxRecDepth 8000 in
theorem sha512_block_armv8_loop_1block (si sf : ArmState)
(h_N : N = 1#64)
(h_si_prelude : sha512_prelude 0x126500#64 N SP CtxBase InputBase si)
(h_si_prelude : SHA512.prelude 0x126500#64 N SP CtxBase InputBase si)
-- TODO: Ideally, nsteps ought to be 485 to be able to simulate the loop to
-- completion.
(h_steps : nsteps = 20)
(h_steps : nsteps = 200)
(h_run : sf = run nsteps si) :
-- (FIXME) PC should be 0x126c94#64 i.e., we are poised to execute the first
-- instruction following the loop. For now, we stop early on to remain in sync.
-- with the number of steps we simulate.
sha512_loop_post (0x126500#64 + nsteps*4)
N SP CtxBase InputBase sf := by
loop_post (0x126500#64 + nsteps*4) N SP CtxBase InputBase sf := by
-- Prelude
subst h_N h_steps
obtain ⟨h_si_program, h_si_pc, h_si_err, h_si_sp_aligned,
h_si_num_blocks, h_si_sp, h_si_ctx_base,
h_si_input_base, h_si_ctx, h_si_ktbl, h_si_separate⟩ := h_si_prelude
simp only [num_blocks, ctx_addr, stack_ptr, input_addr] at *
simp only [sha512_loop_post.iff_def]
simp only [loop_post]
-- Symbolic Simulation
sym_n 20
/-
TODO @alex: The following aggregation fails with
"simp failed, maximum number of steps exceeded"
-/
-- sym_n 200
-- Epilogue
-- cse (config := { processHyps := .allHyps })
simp (config := {ground := true}) only
[fst_AddWithCarry_eq_sub_neg,
ConditionHolds,
state_simp_rules,
bitvec_rules, minimal_theory]
sym_aggregate
assumption
done
-- simp (config := {ground := true}) only
-- [fst_AddWithCarry_eq_sub_neg,
-- ConditionHolds,
-- state_simp_rules,
-- bitvec_rules, minimal_theory]
-- sym_aggregate
-- assumption
-- done
sorry

end SHA512
Loading

0 comments on commit c097f65

Please sign in to comment.