Skip to content

Commit

Permalink
feat: add ci test coverage
Browse files Browse the repository at this point in the history
  • Loading branch information
Draply authored and zk-steve committed Aug 28, 2024
1 parent 97f4f09 commit 4564ca5
Show file tree
Hide file tree
Showing 4 changed files with 6 additions and 65 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/aptos_test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,4 @@ jobs:
with:
version: 4.0.0
- name: test_contract
run: (cd libs && aptos move test --skip-fetch-latest-git-deps --skip-attribute-checks) & (cd verifier && aptos move test --skip-fetch-latest-git-deps --skip-attribute-checks)
run: (cd libs && aptos move test --coverage --dev --skip-fetch-latest-git-deps --skip-attribute-checks) & (cd verifier && aptos move test --coverage --dev --skip-fetch-latest-git-deps --skip-attribute-checks)
58 changes: 1 addition & 57 deletions libs/sources/bytes.move
Original file line number Diff line number Diff line change
Expand Up @@ -4,50 +4,6 @@ module lib_addr::bytes {
use std::vector::{append, for_each_ref};
use aptos_std::from_bcs::to_u256;

// Pads a vector<u8> with a specified byte value up to the desired length
public fun pad(v: vector<u8>, desired_length: u64, pad_byte: u8, pad_left: bool): vector<u8> {
let current_length = vector::length(&v);

if (current_length >= desired_length) {
return v
};

let pad = vector::empty<u8>();
let pad_length = desired_length - current_length;

let i = 0;
while (i < pad_length) {
vector::push_back(&mut pad, pad_byte);
i = i + 1;
};

let padded = vector[];

if (pad_left) {
vector::append(&mut padded, v);
vector::append(&mut padded, pad);
} else {
vector::append(&mut padded, pad);
vector::append(&mut padded, v);
};

return padded
}

public fun reverse(x: vector<u8>): vector<u8> {
let result = vector::empty<u8>();
let length = vector::length(&x);
let i = 0;

while (i < length) {
let byte = vector::borrow(&x, length - 1 - i);
vector::push_back(&mut result, *byte);
i = i + 1;
};

return result
}

public fun vec_to_bytes_be<Element>(v: &vector<Element>): vector<u8> {
let bytes: vector<u8> = vector[];
for_each_ref(v, |e| {
Expand All @@ -73,19 +29,7 @@ module lib_addr::bytes {

#[test_only]
module lib_addr::bytes_test {
use std::bcs::to_bytes;
use std::vector;

use lib_addr::bytes::{pad, vec_to_bytes_be};

#[test]
fun test_padding() {
let value = 0x123456;
let v = to_bytes(&value);
let padded = pad(v, 32, 0x00, true);
assert!(vector::length(&padded) == 32, 1);
assert!(padded == to_bytes(&0x123456u256), 1);
}
use lib_addr::bytes::vec_to_bytes_be;

#[test]
fun test_vec_to_bytes_be() {
Expand Down
4 changes: 2 additions & 2 deletions libs/sources/prime_field_element_0.move
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,8 @@ module lib_addr::prime_field_element_0 {
res
}

public fun fadd(a: u256, b: u256): u256 {
(a % K_MODULUS + b % K_MODULUS) % K_MODULUS
public inline fun fadd(a: u256, b: u256): u256 {
(a + b) % K_MODULUS
}

public inline fun fpow(val: u256, exp: u256): u256 {
Expand Down
7 changes: 2 additions & 5 deletions verifier/sources/cpu/memory_page_fact_registry.move
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ module verifier_addr::memory_page_fact_registry {
const ETOO_MANY_MEMORY_VALUES: u64 = 0x1;
// 0
const REGULAR_PAGE: u256 = 0x0;
// 3618502788666131213697322783095070105623107215331596699973092056135872020481
const K_MODULUS: u256 = 0x800000000000011000000000000000000000000000000000000000000000001;
// End of generating constants!

#[event]
Expand Down Expand Up @@ -185,9 +187,4 @@ module verifier_addr::memory_page_fact_registry {
});
register_fact(s, bytes32_to_u256(fact_hash));
}

// A page based on a list of pairs (address, value).
// In this case, memoryHash = hash(address, value, address, value, address, value, ...).
// A page based on adjacent memory cells, starting from a given address.
// In this case, memoryHash = hash(value, value, value, ...).
}

0 comments on commit 4564ca5

Please sign in to comment.