From c8adb2f344fd954c44fd3124069deb43e43d85a0 Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Tue, 10 Oct 2023 20:59:47 -0500 Subject: [PATCH 01/27] Parallelization infrastructure, GlobalOptions class --- src/kontrol/prove.py | 178 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 162 insertions(+), 16 deletions(-) diff --git a/src/kontrol/prove.py b/src/kontrol/prove.py index 4804d063a..471de2fbe 100644 --- a/src/kontrol/prove.py +++ b/src/kontrol/prove.py @@ -1,6 +1,10 @@ from __future__ import annotations import logging +from abc import ABC, abstractmethod +from dataclasses import dataclass +from threading import Thread +from queue import Queue from subprocess import CalledProcessError from typing import TYPE_CHECKING, NamedTuple @@ -18,6 +22,7 @@ from pyk.kast.inner import KApply, KSequence, KVariable, Subst from pyk.kast.manip import flatten_label, free_vars, set_cell from pyk.kcfg import KCFG +from pyk.kore.rpc import kore_server from pyk.prelude.k import GENERATED_TOP_CELL from pyk.prelude.kbool import FALSE, notBool from pyk.prelude.kint import intToken @@ -35,6 +40,7 @@ from pyk.kast.inner import KInner from pyk.kcfg import KCFGExplore + from pyk.kore.rpc import KoreServer from pyk.utils import BugReport from .solc_to_k import Contract @@ -103,28 +109,48 @@ def foundry_prove( test.method.update_digest(foundry.digest_file) def run_prover(test_suite: list[FoundryTest]) -> dict[tuple[str, int], tuple[bool, list[str] | None]]: - return _run_cfg_group( - test_suite, - foundry, - max_depth=max_depth, - max_iterations=max_iterations, - workers=workers, - simplify_init=simplify_init, - break_every_step=break_every_step, - break_on_jumpi=break_on_jumpi, - break_on_calls=break_on_calls, - bmc_depth=bmc_depth, + llvm_definition_dir = foundry.llvm_library if use_booster else None + + options = GlobalOptions( + foundry=foundry, + auto_abstract_gas=auto_abstract_gas, bug_report=bug_report, kore_rpc_command=kore_rpc_command, - use_booster=use_booster, + llvm_definition_dir=llvm_definition_dir, smt_timeout=smt_timeout, smt_retry_limit=smt_retry_limit, - counterexample_info=counterexample_info, trace_rewrites=trace_rewrites, - auto_abstract_gas=auto_abstract_gas, - port=port, + simplify_init=simplify_init, + bmc_depth=bmc_depth, ) + scheduler = Scheduler(workers=workers, initial_tests=test_suite, options=options) + scheduler.run() + + return {} + + # return _run_cfg_group( + # test_suite, + # foundry, + # max_depth=max_depth, + # max_iterations=max_iterations, + # workers=workers, + # simplify_init=simplify_init, + # break_every_step=break_every_step, + # break_on_jumpi=break_on_jumpi, + # break_on_calls=break_on_calls, + # bmc_depth=bmc_depth, + # bug_report=bug_report, + # kore_rpc_command=kore_rpc_command, + # use_booster=use_booster, + # smt_timeout=smt_timeout, + # smt_retry_limit=smt_retry_limit, + # counterexample_info=counterexample_info, + # trace_rewrites=trace_rewrites, + # auto_abstract_gas=auto_abstract_gas, + # port=port, + # ) + _LOGGER.info(f'Running setup functions in parallel: {setup_method_names}') results = run_prover(setup_methods) failed = [setup_cfg for setup_cfg, passed in results.items() if not passed] @@ -183,6 +209,114 @@ def collect_setup_methods(foundry: Foundry, contracts: Iterable[Contract] = (), return res +class Job(ABC): + @abstractmethod + def execute(self, queue: Queue) -> None: + ... + + +class InitProofJob(Job): + test: FoundryTest + proof: APRProof + options: GlobalOptions + port: int + + def __init__( + self, + test: FoundryTest, + options: GlobalOptions, + port: int, + ) -> None: + self.test = test + self.options = options + self.port = port + + def execute(self, queue: Queue) -> None: + with legacy_explore( + self.options.foundry.kevm, + kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), + id=self.test.id, + bug_report=self.options.bug_report, + kore_rpc_command=self.options.kore_rpc_command, + llvm_definition_dir=self.options.llvm_definition_dir, + smt_timeout=self.options.smt_timeout, + smt_retry_limit=self.options.smt_retry_limit, + trace_rewrites=self.options.trace_rewrites, + start_server=False, + port=self.port, + ) as kcfg_explore: + self.proof = method_to_apr_proof( + self.options.foundry, + self.test, + kcfg_explore, + simplify_init=self.options.simplify_init, + bmc_depth=self.options.bmc_depth, + ) + queue.put(AdvanceProofJob()) + + +@dataclass +class GlobalOptions: + foundry: Foundry + auto_abstract_gas: bool + bug_report: BugReport | None + kore_rpc_command: str | Iterable[str] | None + llvm_definition_dir: Path | None + smt_timeout: int | None + smt_retry_limit: int | None + trace_rewrites: bool + simplify_init: bool + bmc_depth: int | None + + +class AdvanceProofJob(Job): + def execute(self, queue: Queue) -> None: + print('test abc') + ... + + +def create_server(options: GlobalOptions) -> KoreServer: + return kore_server( + definition_dir=options.foundry.kevm.definition_dir, + llvm_definition_dir=options.llvm_definition_dir, + module_name=options.foundry.kevm.main_module, + command=options.kore_rpc_command, + bug_report=options.bug_report, + smt_timeout=options.smt_timeout, + smt_retry_limit=options.smt_retry_limit, + ) + + +class Scheduler: + servers: dict[str, KoreServer] + threads: list[Thread] + task_queue: Queue + options: GlobalOptions + + @staticmethod + def exec_process(task_queue: Queue) -> None: + while True: + job = task_queue.get() + job.execute(task_queue) + task_queue.task_done() + + def __init__(self, workers: int, initial_tests: list[FoundryTest], options: GlobalOptions) -> None: + self.options = options + self.task_queue = Queue() + self.servers = {} + for test in initial_tests: + self.servers[test.id] = create_server(self.options) + self.task_queue.put(InitProofJob(test=test, port=self.servers[test.id].port, options=self.options)) + self.threads = [Thread(target=Scheduler.exec_process, args=(self.task_queue,), daemon=True)] + + def run(self) -> None: + for thread in self.threads: + thread.start() + self.task_queue.join() +# for thread in self.threads: +# thread.join() + + def _run_cfg_group( tests: list[FoundryTest], foundry: Foundry, @@ -205,8 +339,20 @@ def _run_cfg_group( auto_abstract_gas: bool, port: int | None, ) -> dict[tuple[str, int], tuple[bool, list[str] | None]]: + llvm_definition_dir = foundry.llvm_library if use_booster else None + + def create_server() -> KoreServer: + return kore_server( + definition_dir=foundry.kevm.definition_dir, + llvm_definition_dir=llvm_definition_dir, + module_name=foundry.kevm.main_module, + command=kore_rpc_command, + bug_report=bug_report, + smt_timeout=smt_timeout, + smt_retry_limit=smt_retry_limit, + ) + def init_and_run_proof(test: FoundryTest) -> tuple[bool, list[str] | None]: - llvm_definition_dir = foundry.llvm_library if use_booster else None start_server = port is None with legacy_explore( From 6114efede82fb9a5a9da0f82460ff9f7f4858236 Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Wed, 11 Oct 2023 21:28:05 -0500 Subject: [PATCH 02/27] Add parallel execution using separation of cterm extension and kcfg extension --- src/kontrol/prove.py | 296 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 281 insertions(+), 15 deletions(-) diff --git a/src/kontrol/prove.py b/src/kontrol/prove.py index 471de2fbe..418ba981c 100644 --- a/src/kontrol/prove.py +++ b/src/kontrol/prove.py @@ -3,9 +3,9 @@ import logging from abc import ABC, abstractmethod from dataclasses import dataclass -from threading import Thread from queue import Queue from subprocess import CalledProcessError +from threading import Thread from typing import TYPE_CHECKING, NamedTuple from kevm_pyk.kevm import KEVM, KEVMSemantics @@ -28,7 +28,8 @@ from pyk.prelude.kint import intToken from pyk.prelude.ml import mlEqualsTrue from pyk.proof.proof import Proof -from pyk.proof.reachability import APRBMCProof, APRProof +from pyk.proof.reachability import APRBMCProof, APRBMCProver, APRProof, APRProver +from pyk.proof.show import APRProofShow from pyk.utils import run_process, single, unique from .foundry import Foundry @@ -40,6 +41,7 @@ from pyk.kast.inner import KInner from pyk.kcfg import KCFGExplore + from pyk.kcfg.explore import ExtendResult from pyk.kore.rpc import KoreServer from pyk.utils import BugReport @@ -122,12 +124,18 @@ def run_prover(test_suite: list[FoundryTest]) -> dict[tuple[str, int], tuple[boo trace_rewrites=trace_rewrites, simplify_init=simplify_init, bmc_depth=bmc_depth, + max_depth=max_depth, + break_every_step=break_every_step, + break_on_jumpi=break_on_jumpi, + break_on_calls=break_on_calls, + workers=workers, + counterexample_info=counterexample_info, ) scheduler = Scheduler(workers=workers, initial_tests=test_suite, options=options) scheduler.run() - return {} + return scheduler.results # return _run_cfg_group( # test_suite, @@ -211,7 +219,7 @@ def collect_setup_methods(foundry: Foundry, contracts: Iterable[Contract] = (), class Job(ABC): @abstractmethod - def execute(self, queue: Queue) -> None: + def execute(self, queue: Queue, done_queue: Queue) -> None: ... @@ -231,7 +239,7 @@ def __init__( self.options = options self.port = port - def execute(self, queue: Queue) -> None: + def execute(self, queue: Queue, done_queue: Queue) -> None: with legacy_explore( self.options.foundry.kevm, kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), @@ -252,7 +260,12 @@ def execute(self, queue: Queue) -> None: simplify_init=self.options.simplify_init, bmc_depth=self.options.bmc_depth, ) - queue.put(AdvanceProofJob()) + self.proof.write_proof_data() + done_queue.put( + AdvanceProofJob( + test=self.test, node_id=self.proof.init, proof=self.proof, options=self.options, port=self.port + ) + ) @dataclass @@ -267,14 +280,132 @@ class GlobalOptions: trace_rewrites: bool simplify_init: bool bmc_depth: int | None + max_depth: int + break_every_step: bool + break_on_jumpi: bool + break_on_calls: bool + workers: int + counterexample_info: bool class AdvanceProofJob(Job): - def execute(self, queue: Queue) -> None: - print('test abc') + test: FoundryTest + proof: APRProof + options: GlobalOptions + node_id: int + port: int + + def __init__( + self, + test: FoundryTest, + node_id: int, + proof: APRProof, + options: GlobalOptions, + port: int, + ) -> None: + self.test = test + self.node_id = node_id + self.proof = proof + self.options = options + self.port = port + + def execute(self, queue: Queue, done_queue: Queue) -> None: + with legacy_explore( + self.options.foundry.kevm, + kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), + id=self.test.id, + bug_report=self.options.bug_report, + kore_rpc_command=self.options.kore_rpc_command, + llvm_definition_dir=self.options.llvm_definition_dir, + smt_timeout=self.options.smt_timeout, + smt_retry_limit=self.options.smt_retry_limit, + trace_rewrites=self.options.trace_rewrites, + start_server=False, + port=self.port, + ) as kcfg_explore: + curr_node = self.proof.kcfg.node(self.node_id) + terminal_rules = build_terminal_rules(options=self.options) + cut_point_rules = build_cut_point_rules(options=self.options) + # prover = build_prover(options=self.options, proof=self.proof, kcfg_explore=kcfg_explore) + # assert type(prover) is APRProver + + cterm = curr_node.cterm + + extend_result = kcfg_explore.extend_cterm( + cterm, + execute_depth=self.options.max_depth, + cut_point_rules=cut_point_rules, + terminal_rules=terminal_rules, + ) + print(f'pushing ExtendKCFGJob to done_queue {self.test.id} {curr_node.id}') + done_queue.put( + ExtendKCFGJob( + test=self.test, + proof=self.proof, + node_id=curr_node.id, + port=self.port, + extend_result=extend_result, + options=self.options, + ) + ) + + +class CloseThreadJob(Job): + def execute(self, queue: Queue, done_queue: Queue) -> None: ... +class ExtendKCFGJob(Job): + options: GlobalOptions + test: FoundryTest + node_id: int + proof: APRProof + port: int + extend_result: ExtendResult + + def __init__( + self, + test: FoundryTest, + proof: APRProof, + node_id: int, + port: int, + extend_result: ExtendResult, + options: GlobalOptions, + ) -> None: + self.test = test + self.proof = proof + self.node_id = node_id + self.port = port + self.options = options + self.extend_result = extend_result + + def execute(self, queue: Queue, done_queue: Queue) -> None: + with legacy_explore( + self.options.foundry.kevm, + kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), + id=self.test.id, + bug_report=self.options.bug_report, + kore_rpc_command=self.options.kore_rpc_command, + llvm_definition_dir=self.options.llvm_definition_dir, + smt_timeout=self.options.smt_timeout, + smt_retry_limit=self.options.smt_retry_limit, + trace_rewrites=self.options.trace_rewrites, + start_server=False, + port=self.port, + ) as kcfg_explore: + proof_show = APRProofShow(kprint=self.options.foundry.kevm) + print('\n'.join(proof_show.show(self.proof))) + + kcfg_explore.extend_kcfg( + self.extend_result, + kcfg=self.proof.kcfg, + node=self.proof.kcfg.node(self.node_id), + logs=self.proof.logs, + ) + + print('\n'.join(proof_show.show(self.proof))) + + def create_server(options: GlobalOptions) -> KoreServer: return kore_server( definition_dir=options.foundry.kevm.definition_dir, @@ -289,32 +420,169 @@ def create_server(options: GlobalOptions) -> KoreServer: class Scheduler: servers: dict[str, KoreServer] + proofs: dict[FoundryTest, APRProof] threads: list[Thread] task_queue: Queue + done_queue: Queue options: GlobalOptions + results: dict[tuple[str, int], tuple[bool, list[str] | None]] + + job_counter: int + @staticmethod - def exec_process(task_queue: Queue) -> None: + def exec_process(task_queue: Queue, done_queue: Queue) -> None: while True: job = task_queue.get() - job.execute(task_queue) + if type(job) is CloseThreadJob: + print('shutting down thread') + task_queue.task_done() + break + job.execute(task_queue, done_queue) task_queue.task_done() def __init__(self, workers: int, initial_tests: list[FoundryTest], options: GlobalOptions) -> None: self.options = options self.task_queue = Queue() + self.done_queue = Queue() self.servers = {} + self.results = {} + # self.proofs = {} + self.job_counter = 0 + self.done_counter = 0 for test in initial_tests: self.servers[test.id] = create_server(self.options) self.task_queue.put(InitProofJob(test=test, port=self.servers[test.id].port, options=self.options)) - self.threads = [Thread(target=Scheduler.exec_process, args=(self.task_queue,), daemon=True)] + print(f'adding InitProofJob {test.id}') + self.job_counter += 1 + self.threads = [ + Thread(target=Scheduler.exec_process, args=(self.task_queue, self.done_queue), daemon=True) + for i in range(self.options.workers) + ] def run(self) -> None: for thread in self.threads: + print('starting thread') thread.start() + while self.job_counter > 0: + result = self.done_queue.get() + self.job_counter -= 1 + if type(result) is AdvanceProofJob: + print('result is AdvanceProofJob') + print(f'pulled AdvanceProofJob {result.test.id} {result.node_id}') + self.task_queue.put(result) + # self.proofs[result.test] = result.proof + print(f'pushing AdvanceProofJob {result.test.id} {result.node_id}') + self.job_counter += 1 + elif type(result) is ExtendKCFGJob: + print(f'pulled ExtendKCFGJob {result.test.id} {result.node_id}') + + result.execute(self.task_queue, self.done_queue) + + with legacy_explore( + self.options.foundry.kevm, + kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), + id=result.test.id, + bug_report=self.options.bug_report, + kore_rpc_command=self.options.kore_rpc_command, + llvm_definition_dir=self.options.llvm_definition_dir, + smt_timeout=self.options.smt_timeout, + smt_retry_limit=self.options.smt_retry_limit, + trace_rewrites=self.options.trace_rewrites, + start_server=False, + port=result.port, + ) as kcfg_explore: + prover = build_prover(options=self.options, proof=result.proof, kcfg_explore=kcfg_explore) + prover._check_all_terminals() + + for node in result.proof.terminal: + if result.proof.kcfg.is_leaf(node.id) and not result.proof.is_target(node.id): + # TODO can we have a worker thread check subsumtion? + prover._check_subsume(node) + + result.proof.write_proof_data() + + if not result.proof.pending: + failure_log = None + if not result.proof.passed: + failure_log = print_failure_info( + result.proof, kcfg_explore, self.options.counterexample_info + ) + self.results[(result.test.id, result.test.version)] = (result.proof.passed, failure_log) + + for pending_node in result.proof.pending: + if pending_node not in result.proof.kcfg.reachable_nodes(source_id=result.node_id): + continue + print(f'pushing AdvanceProofJob {result.test.id} {pending_node.id}') + self.task_queue.put( + AdvanceProofJob( + test=result.test, + node_id=pending_node.id, + proof=result.proof, + options=self.options, + port=result.port, + ) + ) + self.job_counter += 1 + + for _thread in self.threads: + print('sending cleanup message') + self.task_queue.put(CloseThreadJob()) + + print('a') self.task_queue.join() -# for thread in self.threads: -# thread.join() + print('b') + + +def build_terminal_rules( + options: GlobalOptions, +) -> list[str]: + terminal_rules = ['EVM.halt'] + if options.break_every_step: + terminal_rules.append('EVM.step') + return terminal_rules + + +def build_cut_point_rules( + options: GlobalOptions, +) -> list[str]: + cut_point_rules = [] + if options.break_on_jumpi: + cut_point_rules.extend(['EVM.jumpi.true', 'EVM.jumpi.false']) + if options.break_on_calls: + cut_point_rules.extend( + [ + 'EVM.call', + 'EVM.callcode', + 'EVM.delegatecall', + 'EVM.staticcall', + 'EVM.create', + 'EVM.create2', + 'FOUNDRY.foundry.call', + 'EVM.end', + 'EVM.return.exception', + 'EVM.return.revert', + 'EVM.return.success', + ] + ) + return cut_point_rules + + +def build_prover( + options: GlobalOptions, + proof: Proof, + kcfg_explore: KCFGExplore, + # ) -> Prover +) -> APRProver: + proof = proof + if type(proof) is APRBMCProof: + return APRBMCProver(proof, kcfg_explore) + elif type(proof) is APRProof: + return APRProver(proof, kcfg_explore) + # elif type(proof) is EqualityProof: + # return EqualityProver(kcfg_explore=kcfg_explore, proof=proof) + else: + raise ValueError(f'Do not know how to build prover for proof: {proof}') def _run_cfg_group( @@ -414,7 +682,6 @@ def method_to_apr_proof( ) -> APRProof | APRBMCProof: if Proof.proof_data_exists(test.id, foundry.proofs_dir): apr_proof = foundry.get_apr_proof(test.id) - apr_proof.write_proof_data() return apr_proof setup_cterm = None @@ -444,7 +711,6 @@ def method_to_apr_proof( else: apr_proof = APRProof(test.id, kcfg, [], init_node_id, target_node_id, {}, proof_dir=foundry.proofs_dir) - apr_proof.write_proof_data() return apr_proof From 5ee28ccac0be8954d4d7f9ff06d6be98abbf1634 Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Thu, 12 Oct 2023 18:12:11 -0500 Subject: [PATCH 03/27] Stop when max_iterations is reached --- src/kontrol/__main__.py | 1 + src/kontrol/prove.py | 28 +++++++++++++--------------- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/src/kontrol/__main__.py b/src/kontrol/__main__.py index 56d3caef8..ab614afde 100644 --- a/src/kontrol/__main__.py +++ b/src/kontrol/__main__.py @@ -209,6 +209,7 @@ def exec_prove( for line in failure_log: print(line) + print('c') sys.exit(failed) diff --git a/src/kontrol/prove.py b/src/kontrol/prove.py index 418ba981c..2fef0e759 100644 --- a/src/kontrol/prove.py +++ b/src/kontrol/prove.py @@ -130,6 +130,7 @@ def run_prover(test_suite: list[FoundryTest]) -> dict[tuple[str, int], tuple[boo break_on_calls=break_on_calls, workers=workers, counterexample_info=counterexample_info, + max_iterations=max_iterations, ) scheduler = Scheduler(workers=workers, initial_tests=test_suite, options=options) @@ -286,6 +287,7 @@ class GlobalOptions: break_on_calls: bool workers: int counterexample_info: bool + max_iterations: int | None class AdvanceProofJob(Job): @@ -350,11 +352,6 @@ def execute(self, queue: Queue, done_queue: Queue) -> None: ) -class CloseThreadJob(Job): - def execute(self, queue: Queue, done_queue: Queue) -> None: - ... - - class ExtendKCFGJob(Job): options: GlobalOptions test: FoundryTest @@ -425,6 +422,7 @@ class Scheduler: task_queue: Queue done_queue: Queue options: GlobalOptions + iterations: dict[str, int] results: dict[tuple[str, int], tuple[bool, list[str] | None]] @@ -434,10 +432,6 @@ class Scheduler: def exec_process(task_queue: Queue, done_queue: Queue) -> None: while True: job = task_queue.get() - if type(job) is CloseThreadJob: - print('shutting down thread') - task_queue.task_done() - break job.execute(task_queue, done_queue) task_queue.task_done() @@ -447,12 +441,13 @@ def __init__(self, workers: int, initial_tests: list[FoundryTest], options: Glob self.done_queue = Queue() self.servers = {} self.results = {} - # self.proofs = {} + self.iterations = {} self.job_counter = 0 self.done_counter = 0 for test in initial_tests: self.servers[test.id] = create_server(self.options) self.task_queue.put(InitProofJob(test=test, port=self.servers[test.id].port, options=self.options)) + self.iterations[test.id] = 0 print(f'adding InitProofJob {test.id}') self.job_counter += 1 self.threads = [ @@ -471,10 +466,17 @@ def run(self) -> None: print('result is AdvanceProofJob') print(f'pulled AdvanceProofJob {result.test.id} {result.node_id}') self.task_queue.put(result) - # self.proofs[result.test] = result.proof print(f'pushing AdvanceProofJob {result.test.id} {result.node_id}') self.job_counter += 1 elif type(result) is ExtendKCFGJob: + if ( + self.options.max_iterations is not None + and self.options.max_iterations <= self.iterations[result.test.id] + ): + _LOGGER.warning(f'Reached iteration bound {result.proof.id}: {self.options.max_iterations}') + break + self.iterations[result.test.id] += 1 + print(f'pulled ExtendKCFGJob {result.test.id} {result.node_id}') result.execute(self.task_queue, self.done_queue) @@ -525,10 +527,6 @@ def run(self) -> None: ) self.job_counter += 1 - for _thread in self.threads: - print('sending cleanup message') - self.task_queue.put(CloseThreadJob()) - print('a') self.task_queue.join() print('b') From 56c5ae0983f08da0a0cc4ab80d5da2bed43b46bc Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Thu, 12 Oct 2023 19:00:18 -0500 Subject: [PATCH 04/27] Fix to work with constructor and setup kcfg inclusion --- src/kontrol/prove.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/kontrol/prove.py b/src/kontrol/prove.py index f48bc7061..ee0eeedd7 100644 --- a/src/kontrol/prove.py +++ b/src/kontrol/prove.py @@ -278,13 +278,15 @@ def execute(self, queue: Queue, done_queue: Queue) -> None: kcfg_explore, simplify_init=self.options.simplify_init, bmc_depth=self.options.bmc_depth, + run_constructor=self.options.run_constructor, ) self.proof.write_proof_data() - done_queue.put( - AdvanceProofJob( - test=self.test, node_id=self.proof.init, proof=self.proof, options=self.options, port=self.port + for pending_node in self.proof.pending: + done_queue.put( + AdvanceProofJob( + test=self.test, node_id=pending_node.id, proof=self.proof, options=self.options, port=self.port + ) ) - ) @dataclass @@ -306,6 +308,7 @@ class GlobalOptions: workers: int counterexample_info: bool max_iterations: int | None + run_constructor: bool class AdvanceProofJob(Job): From ef3826645e523494b2a764543ed9644ad513d5c3 Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Thu, 12 Oct 2023 22:35:21 -0500 Subject: [PATCH 05/27] Work with bounded model checking --- src/kontrol/__main__.py | 25 ++++++--- src/kontrol/prove.py | 57 ++++++++++++++++----- src/tests/integration/test_foundry_prove.py | 28 +++++----- 3 files changed, 74 insertions(+), 36 deletions(-) diff --git a/src/kontrol/__main__.py b/src/kontrol/__main__.py index d3ed12991..b96921923 100644 --- a/src/kontrol/__main__.py +++ b/src/kontrol/__main__.py @@ -10,6 +10,8 @@ from kevm_pyk.dist import DistTarget from kevm_pyk.utils import arg_pair_of from pyk.cli.utils import file_path +from pyk.proof.equality import EqualityProof +from pyk.proof.reachability import APRProof from pyk.proof.tui import APRProofViewer from . import VERSION @@ -161,6 +163,7 @@ def exec_prove( trace_rewrites: bool = False, auto_abstract_gas: bool = False, run_constructor: bool = False, + fail_fast: bool = False, **kwargs: Any, ) -> None: _ignore_arg(kwargs, 'main_module', f'--main-module: {kwargs["main_module"]}') @@ -197,19 +200,22 @@ def exec_prove( trace_rewrites=trace_rewrites, auto_abstract_gas=auto_abstract_gas, run_constructor=run_constructor, + fail_fast=fail_fast, ) failed = 0 - for pid, r in results.items(): - passed, failure_log = r - if passed: - print(f'PROOF PASSED: {pid}') + for proof in results: + if proof.passed: + print(f'PROOF PASSED: {proof.id}') else: failed += 1 - print(f'PROOF FAILED: {pid}') - if failure_info and failure_log is not None: + print(f'PROOF FAILED: {proof.id}') + if isinstance(proof, APRProof) and failure_info and proof.failure_info is not None: + failure_log = proof.failure_info.print() failure_log += Foundry.help_info() for line in failure_log: print(line) + elif isinstance(proof, EqualityProof): + print('EqualityProof failed.') print('c') sys.exit(failed) @@ -529,6 +535,13 @@ def _parse_test_version_tuple(value: str) -> tuple[str, int | None]: action='store_true', help='Include the contract constructor in the test execution.', ) + prove_args.add_argument( + '--fail-fast', + dest='fail_fast', + default=False, + action='store_true', + help='Stop execution on other branches if failing node is detected.', + ) show_args = command_parser.add_parser( 'show', diff --git a/src/kontrol/prove.py b/src/kontrol/prove.py index ee0eeedd7..27b41b469 100644 --- a/src/kontrol/prove.py +++ b/src/kontrol/prove.py @@ -30,8 +30,6 @@ from pyk.proof.proof import Proof from pyk.proof.reachability import APRBMCProof, APRBMCProver, APRProof, APRProver from pyk.proof.show import APRProofShow -from pyk.utils import run_process, single, unique -from pyk.proof.reachability import APRBMCProof, APRProof from pyk.utils import run_process, unique from .foundry import Foundry @@ -75,7 +73,8 @@ def foundry_prove( auto_abstract_gas: bool = False, port: int | None = None, run_constructor: bool = False, -) -> dict[tuple[str, int], tuple[bool, list[str] | None]]: + fail_fast: bool = False, +) -> list[APRProof]: if workers <= 0: raise ValueError(f'Must have at least one worker, found: --workers {workers}') if max_iterations is not None and max_iterations < 0: @@ -119,7 +118,7 @@ def foundry_prove( for test in constructor_tests: test.method.update_digest(foundry.digest_file) - def run_prover(test_suite: list[FoundryTest]) -> dict[tuple[str, int], tuple[bool, list[str] | None]]: + def run_prover(test_suite: list[FoundryTest]) -> list[APRProof]: llvm_definition_dir = foundry.llvm_library if use_booster else None options = GlobalOptions( @@ -141,6 +140,7 @@ def run_prover(test_suite: list[FoundryTest]) -> dict[tuple[str, int], tuple[boo counterexample_info=counterexample_info, max_iterations=max_iterations, run_constructor=run_constructor, + fail_fast=fail_fast, ) scheduler = Scheduler(workers=workers, initial_tests=test_suite, options=options) @@ -173,14 +173,14 @@ def run_prover(test_suite: list[FoundryTest]) -> dict[tuple[str, int], tuple[boo if run_constructor: _LOGGER.info(f'Running initialization code for contracts in parallel: {constructor_names}') results = run_prover(constructor_tests) - failed = [init_cfg for init_cfg, passed in results.items() if not passed] + failed = [init_proof for init_proof in results if not init_proof.passed] if failed: raise ValueError(f'Running initialization code failed for {len(failed)} contracts: {failed}') _LOGGER.info(f'Running setup functions in parallel: {setup_method_names}') results = run_prover(setup_method_tests) - failed = [setup_cfg for setup_cfg, passed in results.items() if not passed] + failed = [setup_proof for setup_proof in results if not setup_proof.passed] if failed: raise ValueError(f'Running setUp method failed for {len(failed)} contracts: {failed}') @@ -309,6 +309,7 @@ class GlobalOptions: counterexample_info: bool max_iterations: int | None run_constructor: bool + fail_fast: bool class AdvanceProofJob(Job): @@ -445,7 +446,7 @@ class Scheduler: options: GlobalOptions iterations: dict[str, int] - results: dict[tuple[str, int], tuple[bool, list[str] | None]] + results: list[APRProof] job_counter: int @@ -461,7 +462,7 @@ def __init__(self, workers: int, initial_tests: list[FoundryTest], options: Glob self.task_queue = Queue() self.done_queue = Queue() self.servers = {} - self.results = {} + self.results = [] self.iterations = {} self.job_counter = 0 self.done_counter = 0 @@ -523,20 +524,48 @@ def run(self) -> None: # TODO can we have a worker thread check subsumtion? prover._check_subsume(node) + if result.proof.failed: + prover.save_failure_info() + + if self.options.fail_fast and result.proof.failed: + _LOGGER.warning( + f'Terminating proof early because fail_fast is set {result.proof.id}, failing nodes: {[nd.id for nd in result.proof.failing]}' + ) + self.results.append(result.proof) + continue + result.proof.write_proof_data() if not result.proof.pending: - failure_log = None - if not result.proof.passed: - failure_log = print_failure_info( - result.proof, kcfg_explore, self.options.counterexample_info - ) - self.results[(result.test.id, result.test.version)] = (result.proof.passed, failure_log) + self.results.append(result.proof) for pending_node in result.proof.pending: if pending_node not in result.proof.kcfg.reachable_nodes(source_id=result.node_id): continue print(f'pushing AdvanceProofJob {result.test.id} {pending_node.id}') + + if type(result.proof) is APRBMCProof: + node = result.proof.kcfg.node(pending_node.id) + + _LOGGER.info(f'Checking bmc depth for node {result.proof.id}: {pending_node.id}') + _prior_loops = [ + succ.source.id + for succ in result.proof.shortest_path_to(pending_node.id) + if kcfg_explore.kcfg_semantics.same_loop(succ.source.cterm, node.cterm) + ] + prior_loops: list[int] = [] + for _pl in _prior_loops: + if not ( + result.proof.kcfg.zero_depth_between(_pl, node.id) + or any(result.proof.kcfg.zero_depth_between(_pl, pl) for pl in prior_loops) + ): + prior_loops.append(_pl) + _LOGGER.info(f'Prior loop heads for node {result.proof.id}: {(node.id, prior_loops)}') + if len(prior_loops) > result.proof.bmc_depth: + result.proof.add_bounded(node.id) + self.results.append(result.proof) + continue + self.task_queue.put( AdvanceProofJob( test=result.test, diff --git a/src/tests/integration/test_foundry_prove.py b/src/tests/integration/test_foundry_prove.py index 6ef969035..9b42f383c 100644 --- a/src/tests/integration/test_foundry_prove.py +++ b/src/tests/integration/test_foundry_prove.py @@ -345,23 +345,19 @@ def test_foundry_remove_node( assert_pass(test, prove_res) -def assert_pass(test: str, prove_res: dict[tuple[str, int], tuple[bool, list[str] | None]]) -> None: - id = id_for_test(test, prove_res) - passed, log = prove_res[(test, id)] - if not passed: - assert log - pytest.fail('\n'.join(log)) +def assert_pass(test: str, prove_res: list[APRProof]) -> None: + proof_dict = {proof.id: proof for proof in prove_res} + proof = proof_dict[test] + if not proof.passed: + assert proof.failure_info + pytest.fail('\n'.join(proof.failure_info.print())) -def assert_fail(test: str, prove_res: dict[tuple[str, int], tuple[bool, list[str] | None]]) -> None: - id = id_for_test(test, prove_res) - passed, log = prove_res[test, id] - assert not passed - assert log - - -def id_for_test(test: str, prove_res: dict[tuple[str, int], tuple[bool, list[str] | None]]) -> int: - return single(_id for _test, _id in prove_res.keys() if _test == test and _id is not None) +def assert_fail(test: str, prove_res: list[APRProof]) -> None: + proof_dict = {proof.id: proof for proof in prove_res} + proof = proof_dict[test] + assert not proof.passed + assert proof.failure_info def assert_or_update_show_output(show_res: str, expected_file: Path, *, update: bool) -> None: @@ -404,7 +400,7 @@ def test_foundry_resume_proof( port=server.port, bug_report=bug_report, ) - id = id_for_test(test, prove_res) + id = 0 proof = foundry.get_apr_proof(f'{test}:{id}') assert proof.pending From 85c742cfef4fcfb71b763befdb4d5ce4a07f5706 Mon Sep 17 00:00:00 2001 From: devops Date: Fri, 13 Oct 2023 03:36:41 +0000 Subject: [PATCH 06/27] Set Version: 0.1.27 --- package/version | 2 +- pyproject.toml | 2 +- src/kontrol/__init__.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package/version b/package/version index 7db267292..a2e1aa9d9 100644 --- a/package/version +++ b/package/version @@ -1 +1 @@ -0.1.26 +0.1.27 diff --git a/pyproject.toml b/pyproject.toml index eeac4af1d..e8ce4e1e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "kontrol" -version = "0.1.26" +version = "0.1.27" description = "Foundry integration for KEVM" authors = [ "Runtime Verification, Inc. ", diff --git a/src/kontrol/__init__.py b/src/kontrol/__init__.py index c9214f220..88965db67 100644 --- a/src/kontrol/__init__.py +++ b/src/kontrol/__init__.py @@ -5,4 +5,4 @@ if TYPE_CHECKING: from typing import Final -VERSION: Final = '0.1.26' +VERSION: Final = '0.1.27' From 420a91fb42995664fcd88698ed89c5bb219df342 Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Fri, 13 Oct 2023 15:33:21 -0500 Subject: [PATCH 07/27] Fix test --- src/tests/integration/test_foundry_prove.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/tests/integration/test_foundry_prove.py b/src/tests/integration/test_foundry_prove.py index 9b42f383c..5bc28c37c 100644 --- a/src/tests/integration/test_foundry_prove.py +++ b/src/tests/integration/test_foundry_prove.py @@ -346,16 +346,14 @@ def test_foundry_remove_node( def assert_pass(test: str, prove_res: list[APRProof]) -> None: - proof_dict = {proof.id: proof for proof in prove_res} - proof = proof_dict[test] + proof = single([proof for proof in prove_res if proof.id.split(':')[0] == test]) if not proof.passed: assert proof.failure_info pytest.fail('\n'.join(proof.failure_info.print())) def assert_fail(test: str, prove_res: list[APRProof]) -> None: - proof_dict = {proof.id: proof for proof in prove_res} - proof = proof_dict[test] + proof = single([proof for proof in prove_res if proof.id.split(':')[0] == test]) assert not proof.passed assert proof.failure_info From e9e2d7fd08ad8d49342ffee91265aa4bc15588c3 Mon Sep 17 00:00:00 2001 From: devops Date: Fri, 13 Oct 2023 20:33:58 +0000 Subject: [PATCH 08/27] Set Version: 0.1.28 --- package/version | 2 +- pyproject.toml | 2 +- src/kontrol/__init__.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package/version b/package/version index a2e1aa9d9..baec65a93 100644 --- a/package/version +++ b/package/version @@ -1 +1 @@ -0.1.27 +0.1.28 diff --git a/pyproject.toml b/pyproject.toml index e8ce4e1e5..67cee4165 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "kontrol" -version = "0.1.27" +version = "0.1.28" description = "Foundry integration for KEVM" authors = [ "Runtime Verification, Inc. ", diff --git a/src/kontrol/__init__.py b/src/kontrol/__init__.py index 88965db67..7640e8d16 100644 --- a/src/kontrol/__init__.py +++ b/src/kontrol/__init__.py @@ -5,4 +5,4 @@ if TYPE_CHECKING: from typing import Final -VERSION: Final = '0.1.27' +VERSION: Final = '0.1.28' From b1b7d225de450f2fc270d12c88eeb6b18a6eb35d Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Fri, 13 Oct 2023 20:12:31 -0500 Subject: [PATCH 09/27] Fix running out of memory/not closing servers --- src/kontrol/prove.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/kontrol/prove.py b/src/kontrol/prove.py index 27b41b469..5cc4f57ae 100644 --- a/src/kontrol/prove.py +++ b/src/kontrol/prove.py @@ -454,6 +454,10 @@ class Scheduler: def exec_process(task_queue: Queue, done_queue: Queue) -> None: while True: job = task_queue.get() + print(job) + if job == 0: + task_queue.task_done() + break job.execute(task_queue, done_queue) task_queue.task_done() @@ -577,8 +581,16 @@ def run(self) -> None: ) self.job_counter += 1 + for _thread in self.threads: + self.task_queue.put(0) + print('a') self.task_queue.join() + for thread in self.threads: + thread.join() + + for server in self.servers.values(): + server.close() print('b') From 97e2f8386c54406ab9e37764fe47635c5b81fbc9 Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Mon, 16 Oct 2023 14:59:31 -0500 Subject: [PATCH 10/27] Try using processes instead of threads --- src/kontrol/prove.py | 96 +++++++++++++++++++++++++++++++++++--------- 1 file changed, 78 insertions(+), 18 deletions(-) diff --git a/src/kontrol/prove.py b/src/kontrol/prove.py index 5cc4f57ae..5133ee82e 100644 --- a/src/kontrol/prove.py +++ b/src/kontrol/prove.py @@ -1,11 +1,12 @@ from __future__ import annotations import logging +import time from abc import ABC, abstractmethod from dataclasses import dataclass +from multiprocessing import Process from queue import Queue from subprocess import CalledProcessError -from threading import Thread from typing import TYPE_CHECKING, NamedTuple from kevm_pyk.kevm import KEVM, KEVMSemantics @@ -29,7 +30,6 @@ from pyk.prelude.ml import mlEqualsTrue from pyk.proof.proof import Proof from pyk.proof.reachability import APRBMCProof, APRBMCProver, APRProof, APRProver -from pyk.proof.show import APRProofShow from pyk.utils import run_process, unique from .foundry import Foundry @@ -361,7 +361,7 @@ def execute(self, queue: Queue, done_queue: Queue) -> None: cut_point_rules=cut_point_rules, terminal_rules=terminal_rules, ) - print(f'pushing ExtendKCFGJob to done_queue {self.test.id} {curr_node.id}') + # print(f'pushing ExtendKCFGJob to done_queue {self.test.id} {curr_node.id}') done_queue.put( ExtendKCFGJob( test=self.test, @@ -412,8 +412,8 @@ def execute(self, queue: Queue, done_queue: Queue) -> None: start_server=False, port=self.port, ) as kcfg_explore: - proof_show = APRProofShow(kprint=self.options.foundry.kevm) - print('\n'.join(proof_show.show(self.proof))) + # proof_show = APRProofShow(kprint=self.options.foundry.kevm) + # print('\n'.join(proof_show.show(self.proof))) kcfg_explore.extend_kcfg( self.extend_result, @@ -422,7 +422,8 @@ def execute(self, queue: Queue, done_queue: Queue) -> None: logs=self.proof.logs, ) - print('\n'.join(proof_show.show(self.proof))) + +# print('\n'.join(proof_show.show(self.proof))) def create_server(options: GlobalOptions) -> KoreServer: @@ -440,7 +441,7 @@ def create_server(options: GlobalOptions) -> KoreServer: class Scheduler: servers: dict[str, KoreServer] proofs: dict[FoundryTest, APRProof] - threads: list[Thread] + threads: list[Process] task_queue: Queue done_queue: Queue options: GlobalOptions @@ -454,7 +455,7 @@ class Scheduler: def exec_process(task_queue: Queue, done_queue: Queue) -> None: while True: job = task_queue.get() - print(job) + # print(job) if job == 0: task_queue.task_done() break @@ -474,27 +475,49 @@ def __init__(self, workers: int, initial_tests: list[FoundryTest], options: Glob self.servers[test.id] = create_server(self.options) self.task_queue.put(InitProofJob(test=test, port=self.servers[test.id].port, options=self.options)) self.iterations[test.id] = 0 - print(f'adding InitProofJob {test.id}') + # print(f'adding InitProofJob {test.id}') self.job_counter += 1 self.threads = [ - Thread(target=Scheduler.exec_process, args=(self.task_queue, self.done_queue), daemon=True) + Process(target=Scheduler.exec_process, args=(self.task_queue, self.done_queue), daemon=True) for i in range(self.options.workers) ] def run(self) -> None: for thread in self.threads: - print('starting thread') + # print('starting thread') thread.start() while self.job_counter > 0: + print('waiting for new job') result = self.done_queue.get() + print('got job') + print('working') self.job_counter -= 1 if type(result) is AdvanceProofJob: - print('result is AdvanceProofJob') - print(f'pulled AdvanceProofJob {result.test.id} {result.node_id}') + # print('result is AdvanceProofJob') + # print(f'pulled AdvanceProofJob {result.test.id} {result.node_id}') self.task_queue.put(result) - print(f'pushing AdvanceProofJob {result.test.id} {result.node_id}') + # with legacy_explore( + # self.options.foundry.kevm, + # kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), + # id=result.test.id, + # bug_report=self.options.bug_report, + # kore_rpc_command=self.options.kore_rpc_command, + # llvm_definition_dir=self.options.llvm_definition_dir, + # smt_timeout=self.options.smt_timeout, + # smt_retry_limit=self.options.smt_retry_limit, + # trace_rewrites=self.options.trace_rewrites, + # start_server=False, + # port=result.port, + # ) as kcfg_explore: + # self.provers[result.proof.id] = build_prover( + # options=self.options, proof=result.proof, kcfg_explore=kcfg_explore + # ) + # print(f'pushing AdvanceProofJob {result.test.id} {result.node_id}') self.job_counter += 1 elif type(result) is ExtendKCFGJob: + print('extendkcfgjob') + + print('checking iteration bound') if ( self.options.max_iterations is not None and self.options.max_iterations <= self.iterations[result.test.id] @@ -503,8 +526,9 @@ def run(self) -> None: break self.iterations[result.test.id] += 1 - print(f'pulled ExtendKCFGJob {result.test.id} {result.node_id}') + # print(f'pulled ExtendKCFGJob {result.test.id} {result.node_id}') + print('extending kcfg') result.execute(self.task_queue, self.done_queue) with legacy_explore( @@ -520,33 +544,65 @@ def run(self) -> None: start_server=False, port=result.port, ) as kcfg_explore: + ns = time.time_ns() + print('building prover') + # prover = self.provers[result.proof.id] prover = build_prover(options=self.options, proof=result.proof, kcfg_explore=kcfg_explore) + ns = time.time_ns() - ns + print(ns / 1000000000) + + ns = time.time_ns() + print('checking terminals') prover._check_all_terminals() + ns = time.time_ns() - ns + print(ns / 1000000000) + ns = time.time_ns() + print('checking subsumtion') for node in result.proof.terminal: if result.proof.kcfg.is_leaf(node.id) and not result.proof.is_target(node.id): # TODO can we have a worker thread check subsumtion? prover._check_subsume(node) + ns = time.time_ns() - ns + print(ns / 1000000000) + ns = time.time_ns() + print('saving failure info if necessary') if result.proof.failed: prover.save_failure_info() + ns = time.time_ns() - ns + print(ns / 1000000000) + ns = time.time_ns() + print('checking fail fast') if self.options.fail_fast and result.proof.failed: _LOGGER.warning( f'Terminating proof early because fail_fast is set {result.proof.id}, failing nodes: {[nd.id for nd in result.proof.failing]}' ) self.results.append(result.proof) continue + ns = time.time_ns() - ns + print(ns / 1000000000) + ns = time.time_ns() + print('writing proof') result.proof.write_proof_data() + ns = time.time_ns() - ns + print(ns / 1000000000) + ns = time.time_ns() + print('saving results') if not result.proof.pending: self.results.append(result.proof) + ns = time.time_ns() - ns + print(ns / 1000000000) + ns = time.time_ns() + print('adding new nodes to queue') for pending_node in result.proof.pending: if pending_node not in result.proof.kcfg.reachable_nodes(source_id=result.node_id): continue - print(f'pushing AdvanceProofJob {result.test.id} {pending_node.id}') + # print(f'pushing AdvanceProofJob {result.test.id} {pending_node.id}') if type(result.proof) is APRBMCProof: node = result.proof.kcfg.node(pending_node.id) @@ -580,18 +636,22 @@ def run(self) -> None: ) ) self.job_counter += 1 + ns = time.time_ns() - ns + print(ns / 1000000000) for _thread in self.threads: self.task_queue.put(0) - print('a') + # print('a') self.task_queue.join() for thread in self.threads: thread.join() for server in self.servers.values(): server.close() - print('b') + + +# print('b') def build_terminal_rules( From c991e59dffc897ebab9af0221aaebd6d0e7e43b5 Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Mon, 16 Oct 2023 15:41:38 -0500 Subject: [PATCH 11/27] Use correct queue class --- poetry.lock | 2 +- pyproject.toml | 1 + src/kontrol/prove.py | 41 ++++++++++++++++++++++++++++------------- 3 files changed, 30 insertions(+), 14 deletions(-) diff --git a/poetry.lock b/poetry.lock index c20073fec..5fc4c73f3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1083,4 +1083,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "1b5320401c843ce558b42d4d68d1337bb5e538b7b0558785f035e80f793a7de0" +content-hash = "93b8bb032c0844bda933e1b16b2ad8d6bd19f52faff380af6517c42c4d1d8b99" diff --git a/pyproject.toml b/pyproject.toml index 67cee4165..9d83572c8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,7 @@ kontrol = "kontrol.__main__:main" [tool.poetry.dependencies] python = "^3.10" kevm-pyk = { git = "https://github.com/runtimeverification/evm-semantics.git", tag = "v1.0.315", subdirectory = "kevm-pyk" } +multiprocess = "^0.70.15" [tool.poetry.group.dev.dependencies] autoflake = "*" diff --git a/src/kontrol/prove.py b/src/kontrol/prove.py index 5133ee82e..6bc21a2c3 100644 --- a/src/kontrol/prove.py +++ b/src/kontrol/prove.py @@ -4,8 +4,6 @@ import time from abc import ABC, abstractmethod from dataclasses import dataclass -from multiprocessing import Process -from queue import Queue from subprocess import CalledProcessError from typing import TYPE_CHECKING, NamedTuple @@ -18,6 +16,8 @@ legacy_explore, print_failure_info, ) +from multiprocess import JoinableQueue # type: ignore +from multiprocess import Process # type: ignore from pathos.pools import ProcessPool # type: ignore from pyk.cterm import CTerm from pyk.kast.inner import KApply, KSequence, KVariable, Subst @@ -238,7 +238,7 @@ def collect_setup_methods(foundry: Foundry, contracts: Iterable[Contract] = (), class Job(ABC): @abstractmethod - def execute(self, queue: Queue, done_queue: Queue) -> None: + def execute(self, queue: JoinableQueue, done_queue: JoinableQueue) -> None: ... @@ -258,7 +258,7 @@ def __init__( self.options = options self.port = port - def execute(self, queue: Queue, done_queue: Queue) -> None: + def execute(self, queue: JoinableQueue, done_queue: JoinableQueue) -> None: with legacy_explore( self.options.foundry.kevm, kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), @@ -272,6 +272,8 @@ def execute(self, queue: Queue, done_queue: Queue) -> None: start_server=False, port=self.port, ) as kcfg_explore: + print('abcd') + self.proof = method_to_apr_proof( self.options.foundry, self.test, @@ -282,6 +284,7 @@ def execute(self, queue: Queue, done_queue: Queue) -> None: ) self.proof.write_proof_data() for pending_node in self.proof.pending: + print('putting into done_queue 1') done_queue.put( AdvanceProofJob( test=self.test, node_id=pending_node.id, proof=self.proof, options=self.options, port=self.port @@ -333,7 +336,7 @@ def __init__( self.options = options self.port = port - def execute(self, queue: Queue, done_queue: Queue) -> None: + def execute(self, queue: JoinableQueue, done_queue: JoinableQueue) -> None: with legacy_explore( self.options.foundry.kevm, kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), @@ -362,6 +365,7 @@ def execute(self, queue: Queue, done_queue: Queue) -> None: terminal_rules=terminal_rules, ) # print(f'pushing ExtendKCFGJob to done_queue {self.test.id} {curr_node.id}') + print('putting into done_queue') done_queue.put( ExtendKCFGJob( test=self.test, @@ -398,7 +402,7 @@ def __init__( self.options = options self.extend_result = extend_result - def execute(self, queue: Queue, done_queue: Queue) -> None: + def execute(self, queue: JoinableQueue, done_queue: JoinableQueue) -> None: with legacy_explore( self.options.foundry.kevm, kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), @@ -442,8 +446,8 @@ class Scheduler: servers: dict[str, KoreServer] proofs: dict[FoundryTest, APRProof] threads: list[Process] - task_queue: Queue - done_queue: Queue + task_queue: JoinableQueue + done_queue: JoinableQueue options: GlobalOptions iterations: dict[str, int] @@ -452,20 +456,26 @@ class Scheduler: job_counter: int @staticmethod - def exec_process(task_queue: Queue, done_queue: Queue) -> None: + def exec_process(task_queue: JoinableQueue, done_queue: JoinableQueue) -> None: while True: + print('getting from task_queue') job = task_queue.get() - # print(job) + print('got from task_queue') + print(job) + print('b') if job == 0: task_queue.task_done() break + print('c') job.execute(task_queue, done_queue) + print('d') task_queue.task_done() + print('e') def __init__(self, workers: int, initial_tests: list[FoundryTest], options: GlobalOptions) -> None: self.options = options - self.task_queue = Queue() - self.done_queue = Queue() + self.task_queue = JoinableQueue() + self.done_queue = JoinableQueue() self.servers = {} self.results = [] self.iterations = {} @@ -473,6 +483,7 @@ def __init__(self, workers: int, initial_tests: list[FoundryTest], options: Glob self.done_counter = 0 for test in initial_tests: self.servers[test.id] = create_server(self.options) + print('putting into task_queue') self.task_queue.put(InitProofJob(test=test, port=self.servers[test.id].port, options=self.options)) self.iterations[test.id] = 0 # print(f'adding InitProofJob {test.id}') @@ -487,14 +498,16 @@ def run(self) -> None: # print('starting thread') thread.start() while self.job_counter > 0: - print('waiting for new job') + print('getting from done_queue') result = self.done_queue.get() + print('got from done_queue') print('got job') print('working') self.job_counter -= 1 if type(result) is AdvanceProofJob: # print('result is AdvanceProofJob') # print(f'pulled AdvanceProofJob {result.test.id} {result.node_id}') + print('putting into task_queue 1') self.task_queue.put(result) # with legacy_explore( # self.options.foundry.kevm, @@ -626,6 +639,7 @@ def run(self) -> None: self.results.append(result.proof) continue + print('putting into task_queue') self.task_queue.put( AdvanceProofJob( test=result.test, @@ -640,6 +654,7 @@ def run(self) -> None: print(ns / 1000000000) for _thread in self.threads: + print('putting into task_queue') self.task_queue.put(0) # print('a') From 47ae2d721b1859f6a7362a1aed219e5bc0b28ccb Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Mon, 16 Oct 2023 20:28:30 -0500 Subject: [PATCH 12/27] Fix not always using booster when booster requested --- src/kontrol/prove.py | 110 ++++++++++++++------ src/tests/integration/test_foundry_prove.py | 2 + 2 files changed, 80 insertions(+), 32 deletions(-) diff --git a/src/kontrol/prove.py b/src/kontrol/prove.py index 6bc21a2c3..8ea0445a2 100644 --- a/src/kontrol/prove.py +++ b/src/kontrol/prove.py @@ -1,7 +1,10 @@ from __future__ import annotations +import linecache import logging +import os import time +import tracemalloc from abc import ABC, abstractmethod from dataclasses import dataclass from subprocess import CalledProcessError @@ -16,8 +19,8 @@ legacy_explore, print_failure_info, ) -from multiprocess import JoinableQueue # type: ignore -from multiprocess import Process # type: ignore +from queue import Queue +from threading import Thread # type: ignore from pathos.pools import ProcessPool # type: ignore from pyk.cterm import CTerm from pyk.kast.inner import KApply, KSequence, KVariable, Subst @@ -80,6 +83,9 @@ def foundry_prove( if max_iterations is not None and max_iterations < 0: raise ValueError(f'Must have a non-negative number of iterations, found: --max-iterations {max_iterations}') + if not use_booster: + exit(3) + if use_booster: try: run_process(('which', 'kore-rpc-booster'), pipe_stderr=True).stdout.strip() @@ -121,6 +127,9 @@ def foundry_prove( def run_prover(test_suite: list[FoundryTest]) -> list[APRProof]: llvm_definition_dir = foundry.llvm_library if use_booster else None + print('abc') + print(llvm_definition_dir) + options = GlobalOptions( foundry=foundry, auto_abstract_gas=auto_abstract_gas, @@ -238,7 +247,7 @@ def collect_setup_methods(foundry: Foundry, contracts: Iterable[Contract] = (), class Job(ABC): @abstractmethod - def execute(self, queue: JoinableQueue, done_queue: JoinableQueue) -> None: + def execute(self, queue: Queue, done_queue: Queue) -> None: ... @@ -258,7 +267,8 @@ def __init__( self.options = options self.port = port - def execute(self, queue: JoinableQueue, done_queue: JoinableQueue) -> None: + def execute(self, queue: Queue, done_queue: Queue) -> None: + print(f'kore_rpc_command = {self.options.kore_rpc_command}') with legacy_explore( self.options.foundry.kevm, kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), @@ -282,6 +292,7 @@ def execute(self, queue: JoinableQueue, done_queue: JoinableQueue) -> None: bmc_depth=self.options.bmc_depth, run_constructor=self.options.run_constructor, ) + print('1234') self.proof.write_proof_data() for pending_node in self.proof.pending: print('putting into done_queue 1') @@ -336,7 +347,8 @@ def __init__( self.options = options self.port = port - def execute(self, queue: JoinableQueue, done_queue: JoinableQueue) -> None: + def execute(self, queue: Queue, done_queue: Queue) -> None: + print(f'kore_rpc_command = {self.options.kore_rpc_command}') with legacy_explore( self.options.foundry.kevm, kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), @@ -402,7 +414,8 @@ def __init__( self.options = options self.extend_result = extend_result - def execute(self, queue: JoinableQueue, done_queue: JoinableQueue) -> None: + def execute(self, queue: Queue, done_queue: Queue) -> None: + print(f'kore_rpc_command = {self.options.kore_rpc_command}') with legacy_explore( self.options.foundry.kevm, kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), @@ -442,12 +455,39 @@ def create_server(options: GlobalOptions) -> KoreServer: ) +def display_top(snapshot: tracemalloc.Snapshot, key_type: str='lineno', limit: int=3) -> None: + snapshot = snapshot.filter_traces( + ( + tracemalloc.Filter(False, ''), + tracemalloc.Filter(False, ''), + ) + ) + top_stats = snapshot.statistics(key_type) + + print('Top %s lines' % limit) + for index, stat in enumerate(top_stats[:limit], 1): + frame = stat.traceback[0] + # replace '/path/to/module/file.py' with 'module/file.py' + filename = os.sep.join(frame.filename.split(os.sep)[-2:]) + print('#%s: %s:%s: %.1f KiB' % (index, filename, frame.lineno, stat.size / 1024)) + line = linecache.getline(frame.filename, frame.lineno).strip() + if line: + print(' %s' % line) + + other = top_stats[limit:] + if other: + size = sum(stat.size for stat in other) + print('%s other: %.1f KiB' % (len(other), size / 1024)) + total = sum(stat.size for stat in top_stats) + print('Total allocated size: %.1f KiB' % (total / 1024)) + + class Scheduler: servers: dict[str, KoreServer] proofs: dict[FoundryTest, APRProof] - threads: list[Process] - task_queue: JoinableQueue - done_queue: JoinableQueue + threads: list[Thread] + task_queue: Queue + done_queue: Queue options: GlobalOptions iterations: dict[str, int] @@ -456,26 +496,25 @@ class Scheduler: job_counter: int @staticmethod - def exec_process(task_queue: JoinableQueue, done_queue: JoinableQueue) -> None: + def exec_process(task_queue: Queue, done_queue: Queue) -> None: + while True: - print('getting from task_queue') + print(' 1') job = task_queue.get() - print('got from task_queue') - print(job) - print('b') + print(' 2') if job == 0: task_queue.task_done() break - print('c') + print(' 3') job.execute(task_queue, done_queue) - print('d') + print(' 4') task_queue.task_done() - print('e') + print(' 5') def __init__(self, workers: int, initial_tests: list[FoundryTest], options: GlobalOptions) -> None: self.options = options - self.task_queue = JoinableQueue() - self.done_queue = JoinableQueue() + self.task_queue = Queue() + self.done_queue = Queue() self.servers = {} self.results = [] self.iterations = {} @@ -489,18 +528,22 @@ def __init__(self, workers: int, initial_tests: list[FoundryTest], options: Glob # print(f'adding InitProofJob {test.id}') self.job_counter += 1 self.threads = [ - Process(target=Scheduler.exec_process, args=(self.task_queue, self.done_queue), daemon=True) + Thread(target=Scheduler.exec_process, args=(self.task_queue, self.done_queue), daemon=False) for i in range(self.options.workers) ] def run(self) -> None: +# tracemalloc.start() + for thread in self.threads: # print('starting thread') thread.start() while self.job_counter > 0: - print('getting from done_queue') +# print(tracemalloc.get_traced_memory()) + + print(' getting from done_queue') result = self.done_queue.get() - print('got from done_queue') + print(' got from done_queue') print('got job') print('working') self.job_counter -= 1 @@ -544,6 +587,7 @@ def run(self) -> None: print('extending kcfg') result.execute(self.task_queue, self.done_queue) + print(f'kore_rpc_command = {self.options.kore_rpc_command}') with legacy_explore( self.options.foundry.kevm, kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), @@ -665,6 +709,8 @@ def run(self) -> None: for server in self.servers.values(): server.close() +# snapshot = tracemalloc.take_snapshot() +# display_top(snapshot) # print('b') @@ -761,16 +807,16 @@ def _run_cfg_group( ) -> dict[tuple[str, int], tuple[bool, list[str] | None]]: llvm_definition_dir = foundry.llvm_library if use_booster else None - def create_server() -> KoreServer: - return kore_server( - definition_dir=foundry.kevm.definition_dir, - llvm_definition_dir=llvm_definition_dir, - module_name=foundry.kevm.main_module, - command=kore_rpc_command, - bug_report=bug_report, - smt_timeout=smt_timeout, - smt_retry_limit=smt_retry_limit, - ) +# def create_server() -> KoreServer: +# return kore_server( +# definition_dir=foundry.kevm.definition_dir, +# llvm_definition_dir=llvm_definition_dir, +# module_name=foundry.kevm.main_module, +# command=kore_rpc_command, +# bug_report=bug_report, +# smt_timeout=smt_timeout, +# smt_retry_limit=smt_retry_limit, +# ) def init_and_run_proof(test: FoundryTest) -> tuple[bool, list[str] | None]: start_server = port is None diff --git a/src/tests/integration/test_foundry_prove.py b/src/tests/integration/test_foundry_prove.py index 9d3f36ae6..be09cbc87 100644 --- a/src/tests/integration/test_foundry_prove.py +++ b/src/tests/integration/test_foundry_prove.py @@ -133,6 +133,7 @@ def test_foundry_prove( counterexample_info=True, port=server.port, bug_report=bug_report, + use_booster=use_booster, ) # Then @@ -173,6 +174,7 @@ def test_foundry_fail( simplify_init=False, counterexample_info=True, port=server.port, + use_booster=use_booster, ) # Then From bd5d072170ff7e1deca189753f5a64bbbc9671a1 Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Mon, 16 Oct 2023 20:29:03 -0500 Subject: [PATCH 13/27] Fix formatting --- src/kontrol/prove.py | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/src/kontrol/prove.py b/src/kontrol/prove.py index 8ea0445a2..53783b8f1 100644 --- a/src/kontrol/prove.py +++ b/src/kontrol/prove.py @@ -7,7 +7,9 @@ import tracemalloc from abc import ABC, abstractmethod from dataclasses import dataclass +from queue import Queue from subprocess import CalledProcessError +from threading import Thread # type: ignore from typing import TYPE_CHECKING, NamedTuple from kevm_pyk.kevm import KEVM, KEVMSemantics @@ -19,8 +21,6 @@ legacy_explore, print_failure_info, ) -from queue import Queue -from threading import Thread # type: ignore from pathos.pools import ProcessPool # type: ignore from pyk.cterm import CTerm from pyk.kast.inner import KApply, KSequence, KVariable, Subst @@ -455,7 +455,7 @@ def create_server(options: GlobalOptions) -> KoreServer: ) -def display_top(snapshot: tracemalloc.Snapshot, key_type: str='lineno', limit: int=3) -> None: +def display_top(snapshot: tracemalloc.Snapshot, key_type: str = 'lineno', limit: int = 3) -> None: snapshot = snapshot.filter_traces( ( tracemalloc.Filter(False, ''), @@ -497,7 +497,6 @@ class Scheduler: @staticmethod def exec_process(task_queue: Queue, done_queue: Queue) -> None: - while True: print(' 1') job = task_queue.get() @@ -533,13 +532,13 @@ def __init__(self, workers: int, initial_tests: list[FoundryTest], options: Glob ] def run(self) -> None: -# tracemalloc.start() + # tracemalloc.start() for thread in self.threads: # print('starting thread') thread.start() while self.job_counter > 0: -# print(tracemalloc.get_traced_memory()) + # print(tracemalloc.get_traced_memory()) print(' getting from done_queue') result = self.done_queue.get() @@ -709,6 +708,7 @@ def run(self) -> None: for server in self.servers.values(): server.close() + # snapshot = tracemalloc.take_snapshot() # display_top(snapshot) @@ -807,16 +807,16 @@ def _run_cfg_group( ) -> dict[tuple[str, int], tuple[bool, list[str] | None]]: llvm_definition_dir = foundry.llvm_library if use_booster else None -# def create_server() -> KoreServer: -# return kore_server( -# definition_dir=foundry.kevm.definition_dir, -# llvm_definition_dir=llvm_definition_dir, -# module_name=foundry.kevm.main_module, -# command=kore_rpc_command, -# bug_report=bug_report, -# smt_timeout=smt_timeout, -# smt_retry_limit=smt_retry_limit, -# ) + # def create_server() -> KoreServer: + # return kore_server( + # definition_dir=foundry.kevm.definition_dir, + # llvm_definition_dir=llvm_definition_dir, + # module_name=foundry.kevm.main_module, + # command=kore_rpc_command, + # bug_report=bug_report, + # smt_timeout=smt_timeout, + # smt_retry_limit=smt_retry_limit, + # ) def init_and_run_proof(test: FoundryTest) -> tuple[bool, list[str] | None]: start_server = port is None From a1b5054b18b0f3b4f242e7cf40b1e1d334e1ef3f Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Mon, 16 Oct 2023 20:35:51 -0500 Subject: [PATCH 14/27] Update poetry.lock --- poetry.lock | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/poetry.lock b/poetry.lock index 5fc4c73f3..a96319108 100644 --- a/poetry.lock +++ b/poetry.lock @@ -736,25 +736,27 @@ dill = ["dill (>=0.3.7)"] [[package]] name = "psutil" -version = "5.9.5" +version = "5.9.6" description = "Cross-platform lib for process and system monitoring in Python." optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ - {file = "psutil-5.9.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:be8929ce4313f9f8146caad4272f6abb8bf99fc6cf59344a3167ecd74f4f203f"}, - {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ab8ed1a1d77c95453db1ae00a3f9c50227ebd955437bcf2a574ba8adbf6a74d5"}, - {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:4aef137f3345082a3d3232187aeb4ac4ef959ba3d7c10c33dd73763fbc063da4"}, - {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ea8518d152174e1249c4f2a1c89e3e6065941df2fa13a1ab45327716a23c2b48"}, - {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:acf2aef9391710afded549ff602b5887d7a2349831ae4c26be7c807c0a39fac4"}, - {file = "psutil-5.9.5-cp27-none-win32.whl", hash = "sha256:5b9b8cb93f507e8dbaf22af6a2fd0ccbe8244bf30b1baad6b3954e935157ae3f"}, - {file = "psutil-5.9.5-cp27-none-win_amd64.whl", hash = "sha256:8c5f7c5a052d1d567db4ddd231a9d27a74e8e4a9c3f44b1032762bd7b9fdcd42"}, - {file = "psutil-5.9.5-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:3c6f686f4225553615612f6d9bc21f1c0e305f75d7d8454f9b46e901778e7217"}, - {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a7dd9997128a0d928ed4fb2c2d57e5102bb6089027939f3b722f3a210f9a8da"}, - {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89518112647f1276b03ca97b65cc7f64ca587b1eb0278383017c2a0dcc26cbe4"}, - {file = "psutil-5.9.5-cp36-abi3-win32.whl", hash = "sha256:104a5cc0e31baa2bcf67900be36acde157756b9c44017b86b2c049f11957887d"}, - {file = "psutil-5.9.5-cp36-abi3-win_amd64.whl", hash = "sha256:b258c0c1c9d145a1d5ceffab1134441c4c5113b2417fafff7315a917a026c3c9"}, - {file = "psutil-5.9.5-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:c607bb3b57dc779d55e1554846352b4e358c10fff3abf3514a7a6601beebdb30"}, - {file = "psutil-5.9.5.tar.gz", hash = "sha256:5410638e4df39c54d957fc51ce03048acd8e6d60abc0f5107af51e5fb566eb3c"}, + {file = "psutil-5.9.6-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:fb8a697f11b0f5994550555fcfe3e69799e5b060c8ecf9e2f75c69302cc35c0d"}, + {file = "psutil-5.9.6-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:91ecd2d9c00db9817a4b4192107cf6954addb5d9d67a969a4f436dbc9200f88c"}, + {file = "psutil-5.9.6-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:10e8c17b4f898d64b121149afb136c53ea8b68c7531155147867b7b1ac9e7e28"}, + {file = "psutil-5.9.6-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:18cd22c5db486f33998f37e2bb054cc62fd06646995285e02a51b1e08da97017"}, + {file = "psutil-5.9.6-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:ca2780f5e038379e520281e4c032dddd086906ddff9ef0d1b9dcf00710e5071c"}, + {file = "psutil-5.9.6-cp27-none-win32.whl", hash = "sha256:70cb3beb98bc3fd5ac9ac617a327af7e7f826373ee64c80efd4eb2856e5051e9"}, + {file = "psutil-5.9.6-cp27-none-win_amd64.whl", hash = "sha256:51dc3d54607c73148f63732c727856f5febec1c7c336f8f41fcbd6315cce76ac"}, + {file = "psutil-5.9.6-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c69596f9fc2f8acd574a12d5f8b7b1ba3765a641ea5d60fb4736bf3c08a8214a"}, + {file = "psutil-5.9.6-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92e0cc43c524834af53e9d3369245e6cc3b130e78e26100d1f63cdb0abeb3d3c"}, + {file = "psutil-5.9.6-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:748c9dd2583ed86347ed65d0035f45fa8c851e8d90354c122ab72319b5f366f4"}, + {file = "psutil-5.9.6-cp36-cp36m-win32.whl", hash = "sha256:3ebf2158c16cc69db777e3c7decb3c0f43a7af94a60d72e87b2823aebac3d602"}, + {file = "psutil-5.9.6-cp36-cp36m-win_amd64.whl", hash = "sha256:ff18b8d1a784b810df0b0fff3bcb50ab941c3b8e2c8de5726f9c71c601c611aa"}, + {file = "psutil-5.9.6-cp37-abi3-win32.whl", hash = "sha256:a6f01f03bf1843280f4ad16f4bde26b817847b4c1a0db59bf6419807bc5ce05c"}, + {file = "psutil-5.9.6-cp37-abi3-win_amd64.whl", hash = "sha256:6e5fb8dc711a514da83098bc5234264e551ad980cec5f85dabf4d38ed6f15e9a"}, + {file = "psutil-5.9.6-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:daecbcbd29b289aac14ece28eca6a3e60aa361754cf6da3dfb20d4d32b6c7f57"}, + {file = "psutil-5.9.6.tar.gz", hash = "sha256:e4b92ddcd7dd4cdd3f900180ea1e104932c7bce234fb88976e2a3b296441225a"}, ] [package.extras] @@ -776,13 +778,13 @@ global = ["pybind11-global (==2.11.1)"] [[package]] name = "pycodestyle" -version = "2.11.0" +version = "2.11.1" description = "Python style guide checker" optional = false python-versions = ">=3.8" files = [ - {file = "pycodestyle-2.11.0-py2.py3-none-any.whl", hash = "sha256:5d1013ba8dc7895b548be5afb05740ca82454fd899971563d2ef625d090326f8"}, - {file = "pycodestyle-2.11.0.tar.gz", hash = "sha256:259bcc17857d8a8b3b4a2327324b79e5f020a13c16074670f9c8c8f872ea76d0"}, + {file = "pycodestyle-2.11.1-py2.py3-none-any.whl", hash = "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67"}, + {file = "pycodestyle-2.11.1.tar.gz", hash = "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f"}, ] [[package]] From 8fac206f184710697978114b9ea268352822189c Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Mon, 16 Oct 2023 20:41:03 -0500 Subject: [PATCH 15/27] Fix formatting --- src/kontrol/prove.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/kontrol/prove.py b/src/kontrol/prove.py index 53783b8f1..eb547d1ab 100644 --- a/src/kontrol/prove.py +++ b/src/kontrol/prove.py @@ -469,7 +469,7 @@ def display_top(snapshot: tracemalloc.Snapshot, key_type: str = 'lineno', limit: frame = stat.traceback[0] # replace '/path/to/module/file.py' with 'module/file.py' filename = os.sep.join(frame.filename.split(os.sep)[-2:]) - print('#%s: %s:%s: %.1f KiB' % (index, filename, frame.lineno, stat.size / 1024)) + print(f'#{index}: {filename}:{frame.lineno}: {stat.size / 1024:.1f} KiB') line = linecache.getline(frame.filename, frame.lineno).strip() if line: print(' %s' % line) @@ -477,7 +477,7 @@ def display_top(snapshot: tracemalloc.Snapshot, key_type: str = 'lineno', limit: other = top_stats[limit:] if other: size = sum(stat.size for stat in other) - print('%s other: %.1f KiB' % (len(other), size / 1024)) + print(f'{len(other)} other: {size / 1024:.1f} KiB') total = sum(stat.size for stat in top_stats) print('Total allocated size: %.1f KiB' % (total / 1024)) From 9c198ff8d4e52b040182d34496cdd0ed3e08d869 Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Mon, 16 Oct 2023 20:56:09 -0500 Subject: [PATCH 16/27] Remove exit command --- src/kontrol/prove.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/kontrol/prove.py b/src/kontrol/prove.py index eb547d1ab..3da00da5c 100644 --- a/src/kontrol/prove.py +++ b/src/kontrol/prove.py @@ -83,9 +83,6 @@ def foundry_prove( if max_iterations is not None and max_iterations < 0: raise ValueError(f'Must have a non-negative number of iterations, found: --max-iterations {max_iterations}') - if not use_booster: - exit(3) - if use_booster: try: run_process(('which', 'kore-rpc-booster'), pipe_stderr=True).stdout.strip() From a26af5596e703f9f51230296b1a19e65998387b6 Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Mon, 16 Oct 2023 22:00:19 -0500 Subject: [PATCH 17/27] Use same port --- src/kontrol/prove.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/src/kontrol/prove.py b/src/kontrol/prove.py index 3da00da5c..0450e7337 100644 --- a/src/kontrol/prove.py +++ b/src/kontrol/prove.py @@ -149,7 +149,7 @@ def run_prover(test_suite: list[FoundryTest]) -> list[APRProof]: fail_fast=fail_fast, ) - scheduler = Scheduler(workers=workers, initial_tests=test_suite, options=options) + scheduler = Scheduler(workers=workers, initial_tests=test_suite, options=options, port=port) scheduler.run() return scheduler.results @@ -507,19 +507,25 @@ def exec_process(task_queue: Queue, done_queue: Queue) -> None: task_queue.task_done() print(' 5') - def __init__(self, workers: int, initial_tests: list[FoundryTest], options: GlobalOptions) -> None: + def __init__( + self, workers: int, initial_tests: list[FoundryTest], options: GlobalOptions, port: int | None + ) -> None: self.options = options self.task_queue = Queue() self.done_queue = Queue() - self.servers = {} + # self.servers = {} self.results = [] self.iterations = {} self.job_counter = 0 self.done_counter = 0 for test in initial_tests: - self.servers[test.id] = create_server(self.options) + # self.servers[test.id] = create_server(self.options) print('putting into task_queue') - self.task_queue.put(InitProofJob(test=test, port=self.servers[test.id].port, options=self.options)) + # self.task_queue.put(InitProofJob(test=test, port=self.servers[test.id].port, options=self.options)) + if port is None: + server = create_server(self.options) + port = server.port + self.task_queue.put(InitProofJob(test=test, port=port, options=self.options)) self.iterations[test.id] = 0 # print(f'adding InitProofJob {test.id}') self.job_counter += 1 @@ -552,9 +558,6 @@ def run(self) -> None: # self.options.foundry.kevm, # kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), # id=result.test.id, - # bug_report=self.options.bug_report, - # kore_rpc_command=self.options.kore_rpc_command, - # llvm_definition_dir=self.options.llvm_definition_dir, # smt_timeout=self.options.smt_timeout, # smt_retry_limit=self.options.smt_retry_limit, # trace_rewrites=self.options.trace_rewrites, @@ -702,8 +705,9 @@ def run(self) -> None: for thread in self.threads: thread.join() - for server in self.servers.values(): - server.close() + +# for server in self.servers.values(): +# server.close() # snapshot = tracemalloc.take_snapshot() From 616c485f15e747d3ff9cbba9349e6a06b6263530 Mon Sep 17 00:00:00 2001 From: devops Date: Wed, 18 Oct 2023 22:51:16 +0000 Subject: [PATCH 18/27] Set Version: 0.1.31 --- package/version | 2 +- pyproject.toml | 2 +- src/kontrol/__init__.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package/version b/package/version index 013adb716..db7a48047 100644 --- a/package/version +++ b/package/version @@ -1 +1 @@ -0.1.30 +0.1.31 diff --git a/pyproject.toml b/pyproject.toml index 90e84d995..ba8684456 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "kontrol" -version = "0.1.30" +version = "0.1.31" description = "Foundry integration for KEVM" authors = [ "Runtime Verification, Inc. ", diff --git a/src/kontrol/__init__.py b/src/kontrol/__init__.py index f3bac6ec6..c7c9d7be9 100644 --- a/src/kontrol/__init__.py +++ b/src/kontrol/__init__.py @@ -5,4 +5,4 @@ if TYPE_CHECKING: from typing import Final -VERSION: Final = '0.1.30' +VERSION: Final = '0.1.31' From 45c085dab066bd9d7648144bbeedc66af9b5704c Mon Sep 17 00:00:00 2001 From: devops Date: Tue, 24 Oct 2023 01:18:24 +0000 Subject: [PATCH 19/27] Set Version: 0.1.37 --- package/version | 2 +- pyproject.toml | 2 +- src/kontrol/__init__.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package/version b/package/version index 072d0fa39..9f42295fc 100644 --- a/package/version +++ b/package/version @@ -1 +1 @@ -0.1.36 +0.1.37 diff --git a/pyproject.toml b/pyproject.toml index 14845e41b..4d331b135 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "kontrol" -version = "0.1.36" +version = "0.1.37" description = "Foundry integration for KEVM" authors = [ "Runtime Verification, Inc. ", diff --git a/src/kontrol/__init__.py b/src/kontrol/__init__.py index 72cbcf8c0..b6abbf12a 100644 --- a/src/kontrol/__init__.py +++ b/src/kontrol/__init__.py @@ -5,4 +5,4 @@ if TYPE_CHECKING: from typing import Final -VERSION: Final = '0.1.36' +VERSION: Final = '0.1.37' From eb2fa59e0d02cabb7f605ea1da36c59844bc297d Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Tue, 24 Oct 2023 15:05:34 -0500 Subject: [PATCH 20/27] Fix job counter not being updated --- src/kontrol/__main__.py | 7 ------ src/kontrol/prove.py | 25 ++++++++++++++++++++- src/tests/integration/test_foundry_prove.py | 3 +++ 3 files changed, 27 insertions(+), 8 deletions(-) diff --git a/src/kontrol/__main__.py b/src/kontrol/__main__.py index b8ccbe8a6..ef6b9b264 100644 --- a/src/kontrol/__main__.py +++ b/src/kontrol/__main__.py @@ -537,13 +537,6 @@ def _parse_test_version_tuple(value: str) -> tuple[str, int | None]: action='store_true', help='Include the contract constructor in the test execution.', ) - prove_args.add_argument( - '--fail-fast', - dest='fail_fast', - default=False, - action='store_true', - help='Stop execution on other branches if failing node is detected.', - ) show_args = command_parser.add_parser( 'show', diff --git a/src/kontrol/prove.py b/src/kontrol/prove.py index c65998e74..b31b81a79 100644 --- a/src/kontrol/prove.py +++ b/src/kontrol/prove.py @@ -27,9 +27,10 @@ from pyk.prelude.ml import mlEqualsTrue from pyk.proof.proof import Proof from pyk.proof.reachability import APRBMCProof, APRBMCProver, APRProof, APRProver +from pyk.proof.show import APRProofShow from pyk.utils import run_process, unique -from .foundry import Foundry +from .foundry import Foundry, foundry_node_printer from .solc_to_k import Contract if TYPE_CHECKING: @@ -196,6 +197,7 @@ def __init__( def execute(self, queue: Queue, done_queue: Queue) -> None: llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None + print('execute InitProofJob') with legacy_explore( self.foundry.kevm, @@ -256,6 +258,7 @@ def __init__( self.foundry = foundry def execute(self, queue: Queue, done_queue: Queue) -> None: + print('execute AdvanceProofJob') llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None with legacy_explore( self.foundry.kevm, @@ -282,6 +285,7 @@ def execute(self, queue: Queue, done_queue: Queue) -> None: cut_point_rules=cut_point_rules, terminal_rules=terminal_rules, ) + print('Adding ExtendKCFGJob') done_queue.put( ExtendKCFGJob( test=self.test, @@ -323,6 +327,8 @@ def __init__( self.foundry = foundry def execute(self, queue: Queue, done_queue: Queue) -> None: + print('execute ExtendKCFGJob') + llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None with legacy_explore( self.foundry.kevm, @@ -409,11 +415,17 @@ def run(self) -> None: for thread in self.threads: thread.start() while self.job_counter > 0: + print('getting Job') result = self.done_queue.get() + print('got Job') self.job_counter -= 1 if type(result) is AdvanceProofJob: + print('got AdvanceProofJob') self.task_queue.put(result) + self.job_counter += 1 elif type(result) is ExtendKCFGJob: + print('got ExtendKCFGJob') + if ( self.options.max_iterations is not None and self.options.max_iterations <= self.iterations[result.test.id] @@ -440,6 +452,13 @@ def run(self) -> None: ) as kcfg_explore: prover = build_prover(options=self.options, proof=result.proof, kcfg_explore=kcfg_explore) prover._check_all_terminals() + + node_printer = foundry_node_printer(self.foundry, result.test.contract.name, result.proof) + proof_show = APRProofShow(self.foundry.kevm, node_printer=node_printer) + + res_lines = proof_show.show(result.proof) + print('\n'.join(res_lines)) + for node in result.proof.terminal: if result.proof.kcfg.is_leaf(node.id) and not result.proof.is_target(node.id): # TODO can we have a worker thread check subsumtion? @@ -493,6 +512,10 @@ def run(self) -> None: ) ) self.job_counter += 1 + else: + print('Got other') + + print('done') for _thread in self.threads: self.task_queue.put(0) diff --git a/src/tests/integration/test_foundry_prove.py b/src/tests/integration/test_foundry_prove.py index 40d7b5f78..fe938cabf 100644 --- a/src/tests/integration/test_foundry_prove.py +++ b/src/tests/integration/test_foundry_prove.py @@ -138,6 +138,8 @@ def test_foundry_prove( ), ) + print(test_id) + # Then assert_pass(test_id, prove_res) @@ -350,6 +352,7 @@ def test_foundry_remove_node( def assert_pass(test: str, prove_res: list[APRProof]) -> None: + print(prove_res) proof = single([proof for proof in prove_res if proof.id.split(':')[0] == test]) if not proof.passed: assert proof.failure_info From 983da90a8e524f12b42ed093580202bab8b8c1e7 Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Tue, 24 Oct 2023 17:54:12 -0500 Subject: [PATCH 21/27] Move writing proof data and collecting proof result to end of extend logic --- src/kontrol/prove.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/src/kontrol/prove.py b/src/kontrol/prove.py index b31b81a79..5aad7acd0 100644 --- a/src/kontrol/prove.py +++ b/src/kontrol/prove.py @@ -450,12 +450,18 @@ def run(self) -> None: start_server=False, port=result.port, ) as kcfg_explore: + proof_done = False prover = build_prover(options=self.options, proof=result.proof, kcfg_explore=kcfg_explore) - prover._check_all_terminals() node_printer = foundry_node_printer(self.foundry, result.test.contract.name, result.proof) proof_show = APRProofShow(self.foundry.kevm, node_printer=node_printer) + res_lines = proof_show.show(result.proof) + print('\n'.join(res_lines)) + prover._check_all_terminals() + + node_printer = foundry_node_printer(self.foundry, result.test.contract.name, result.proof) + proof_show = APRProofShow(self.foundry.kevm, node_printer=node_printer) res_lines = proof_show.show(result.proof) print('\n'.join(res_lines)) @@ -470,12 +476,12 @@ def run(self) -> None: _LOGGER.warning( f'Terminating proof early because fail_fast is set {result.proof.id}, failing nodes: {[nd.id for nd in result.proof.failing]}' ) - self.results.append(result.proof) - continue - result.proof.write_proof_data() + proof_done = True if not result.proof.pending: - self.results.append(result.proof) + proof_done = True + for pending_node in result.proof.pending: + print(f'checking pending for node {pending_node.id}') if pending_node not in result.proof.kcfg.reachable_nodes(source_id=result.node_id): continue @@ -498,7 +504,6 @@ def run(self) -> None: _LOGGER.info(f'Prior loop heads for node {result.proof.id}: {(node.id, prior_loops)}') if len(prior_loops) > result.proof.bmc_depth: result.proof.add_bounded(node.id) - self.results.append(result.proof) continue self.task_queue.put( @@ -512,6 +517,10 @@ def run(self) -> None: ) ) self.job_counter += 1 + + result.proof.write_proof_data() + if proof_done: + self.results.append(result.proof) else: print('Got other') From 03a2307864f9c0b401ef74f737265c5f953ea0da Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Tue, 24 Oct 2023 21:05:13 -0500 Subject: [PATCH 22/27] Move syncing proof logic to separate function, create one server per proof and shut them down when done --- src/kontrol/prove.py | 219 +++++++++++++++++++++++-------------------- 1 file changed, 119 insertions(+), 100 deletions(-) diff --git a/src/kontrol/prove.py b/src/kontrol/prove.py index 5aad7acd0..4d097bcd6 100644 --- a/src/kontrol/prove.py +++ b/src/kontrol/prove.py @@ -372,6 +372,7 @@ class Scheduler: done_queue: Queue options: ProveOptions iterations: dict[str, int] + kcfg_explore_map: dict[str, KCFGExplore] foundry: Foundry results: list[APRProof] @@ -397,20 +398,129 @@ def __init__( self.foundry = foundry self.results = [] self.iterations = {} + self.servers = {} + self.kcfg_explore_map = {} self.job_counter = 0 self.done_counter = 0 for test in initial_tests: - if port is None: - server = create_server(self.options, foundry=foundry) - port = server.port + # if port is None: + # server = create_server(self.options, foundry=foundry) + # port = server.port + server = create_server(self.options, foundry=foundry) + port = server.port + self.servers[test.id] = server self.task_queue.put(InitProofJob(test=test, port=port, options=self.options, foundry=foundry)) self.iterations[test.id] = 0 + # self.kcfg_explore_map[test.id] = legacy_explore( + # self.foundry.kevm, + # kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), + # id=test.id, + # bug_report=self.options.bug_report, + # kore_rpc_command=self.options.kore_rpc_command, + # llvm_definition_dir=llvm_definition_dir, + # smt_timeout=self.options.smt_timeout, + # smt_retry_limit=self.options.smt_retry_limit, + # trace_rewrites=self.options.trace_rewrites, + # start_server=False, + # port=job.port, + # ) self.job_counter += 1 self.threads = [ Thread(target=Scheduler.exec_process, args=(self.task_queue, self.done_queue), daemon=False) for i in range(self.options.workers) ] + def sync_proof(self, job: ExtendKCFGJob) -> bool: + if self.options.max_iterations is not None and self.options.max_iterations <= self.iterations[job.test.id]: + _LOGGER.warning(f'Reached iteration bound {job.proof.id}: {self.options.max_iterations}') + return True + self.iterations[job.test.id] += 1 + + job.execute(self.task_queue, self.done_queue) + llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None + + with legacy_explore( + self.foundry.kevm, + kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), + id=job.test.id, + bug_report=self.options.bug_report, + kore_rpc_command=self.options.kore_rpc_command, + llvm_definition_dir=llvm_definition_dir, + smt_timeout=self.options.smt_timeout, + smt_retry_limit=self.options.smt_retry_limit, + trace_rewrites=self.options.trace_rewrites, + start_server=False, + port=job.port, + ) as kcfg_explore: + prover = build_prover(options=self.options, proof=job.proof, kcfg_explore=kcfg_explore) + + node_printer = foundry_node_printer(self.foundry, job.test.contract.name, job.proof) + proof_show = APRProofShow(self.foundry.kevm, node_printer=node_printer) + res_lines = proof_show.show(job.proof) + print('\n'.join(res_lines)) + + prover._check_all_terminals() + + node_printer = foundry_node_printer(self.foundry, job.test.contract.name, job.proof) + proof_show = APRProofShow(self.foundry.kevm, node_printer=node_printer) + res_lines = proof_show.show(job.proof) + print('\n'.join(res_lines)) + + for node in job.proof.terminal: + if job.proof.kcfg.is_leaf(node.id) and not job.proof.is_target(node.id): + # TODO can we have a worker thread check subsumtion? + prover._check_subsume(node) + + if job.proof.failed: + prover.save_failure_info() + if self.options.fail_fast and job.proof.failed: + _LOGGER.warning( + f'Terminating proof early because fail_fast is set {job.proof.id}, failing nodes: {[nd.id for nd in job.proof.failing]}' + ) + return True + if not job.proof.pending: + return True + + for pending_node in job.proof.pending: + print(f'checking pending for node {pending_node.id}') + if pending_node not in job.proof.kcfg.reachable_nodes(source_id=job.node_id): + continue + + if type(job.proof) is APRBMCProof: + node = job.proof.kcfg.node(pending_node.id) + + _LOGGER.info(f'Checking bmc depth for node {job.proof.id}: {pending_node.id}') + _prior_loops = [ + succ.source.id + for succ in job.proof.shortest_path_to(pending_node.id) + if kcfg_explore.kcfg_semantics.same_loop(succ.source.cterm, node.cterm) + ] + prior_loops: list[int] = [] + for _pl in _prior_loops: + if not ( + job.proof.kcfg.zero_depth_between(_pl, node.id) + or any(job.proof.kcfg.zero_depth_between(_pl, pl) for pl in prior_loops) + ): + prior_loops.append(_pl) + _LOGGER.info(f'Prior loop heads for node {job.proof.id}: {(node.id, prior_loops)}') + if len(prior_loops) > job.proof.bmc_depth: + job.proof.add_bounded(node.id) + continue + + self.task_queue.put( + AdvanceProofJob( + test=job.test, + node_id=pending_node.id, + proof=job.proof, + options=self.options, + port=job.port, + foundry=self.foundry, + ) + ) + self.job_counter += 1 + + return False + def run(self) -> None: for thread in self.threads: thread.start() @@ -425,110 +535,19 @@ def run(self) -> None: self.job_counter += 1 elif type(result) is ExtendKCFGJob: print('got ExtendKCFGJob') - - if ( - self.options.max_iterations is not None - and self.options.max_iterations <= self.iterations[result.test.id] - ): - _LOGGER.warning(f'Reached iteration bound {result.proof.id}: {self.options.max_iterations}') - break - self.iterations[result.test.id] += 1 - - result.execute(self.task_queue, self.done_queue) - llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None - - with legacy_explore( - self.foundry.kevm, - kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), - id=result.test.id, - bug_report=self.options.bug_report, - kore_rpc_command=self.options.kore_rpc_command, - llvm_definition_dir=llvm_definition_dir, - smt_timeout=self.options.smt_timeout, - smt_retry_limit=self.options.smt_retry_limit, - trace_rewrites=self.options.trace_rewrites, - start_server=False, - port=result.port, - ) as kcfg_explore: - proof_done = False - prover = build_prover(options=self.options, proof=result.proof, kcfg_explore=kcfg_explore) - - node_printer = foundry_node_printer(self.foundry, result.test.contract.name, result.proof) - proof_show = APRProofShow(self.foundry.kevm, node_printer=node_printer) - res_lines = proof_show.show(result.proof) - print('\n'.join(res_lines)) - - prover._check_all_terminals() - - node_printer = foundry_node_printer(self.foundry, result.test.contract.name, result.proof) - proof_show = APRProofShow(self.foundry.kevm, node_printer=node_printer) - res_lines = proof_show.show(result.proof) - print('\n'.join(res_lines)) - - for node in result.proof.terminal: - if result.proof.kcfg.is_leaf(node.id) and not result.proof.is_target(node.id): - # TODO can we have a worker thread check subsumtion? - prover._check_subsume(node) - - if result.proof.failed: - prover.save_failure_info() - if self.options.fail_fast and result.proof.failed: - _LOGGER.warning( - f'Terminating proof early because fail_fast is set {result.proof.id}, failing nodes: {[nd.id for nd in result.proof.failing]}' - ) - proof_done = True - if not result.proof.pending: - proof_done = True - - for pending_node in result.proof.pending: - print(f'checking pending for node {pending_node.id}') - if pending_node not in result.proof.kcfg.reachable_nodes(source_id=result.node_id): - continue - - if type(result.proof) is APRBMCProof: - node = result.proof.kcfg.node(pending_node.id) - - _LOGGER.info(f'Checking bmc depth for node {result.proof.id}: {pending_node.id}') - _prior_loops = [ - succ.source.id - for succ in result.proof.shortest_path_to(pending_node.id) - if kcfg_explore.kcfg_semantics.same_loop(succ.source.cterm, node.cterm) - ] - prior_loops: list[int] = [] - for _pl in _prior_loops: - if not ( - result.proof.kcfg.zero_depth_between(_pl, node.id) - or any(result.proof.kcfg.zero_depth_between(_pl, pl) for pl in prior_loops) - ): - prior_loops.append(_pl) - _LOGGER.info(f'Prior loop heads for node {result.proof.id}: {(node.id, prior_loops)}') - if len(prior_loops) > result.proof.bmc_depth: - result.proof.add_bounded(node.id) - continue - - self.task_queue.put( - AdvanceProofJob( - test=result.test, - node_id=pending_node.id, - proof=result.proof, - options=self.options, - port=result.port, - foundry=self.foundry, - ) - ) - self.job_counter += 1 - + proof_done = self.sync_proof(result) + if proof_done: + self.results.append(result.proof) result.proof.write_proof_data() - if proof_done: - self.results.append(result.proof) - else: - print('Got other') print('done') for _thread in self.threads: self.task_queue.put(0) + for server in self.servers.values(): + server.close() + self.task_queue.join() for thread in self.threads: thread.join() From ef8c504cbd3c33a55a52e67909e3c330e019cc05 Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Tue, 24 Oct 2023 21:45:33 -0500 Subject: [PATCH 23/27] Don't start servers if port is specified --- src/kontrol/prove.py | 13 ++++++------- src/tests/integration/test_foundry_prove.py | 3 --- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/src/kontrol/prove.py b/src/kontrol/prove.py index 4d097bcd6..f28ff750b 100644 --- a/src/kontrol/prove.py +++ b/src/kontrol/prove.py @@ -390,7 +390,7 @@ def exec_process(task_queue: Queue, done_queue: Queue) -> None: task_queue.task_done() def __init__( - self, workers: int, initial_tests: list[FoundryTest], options: ProveOptions, port: int | None, foundry: Foundry + self, workers: int, initial_tests: list[FoundryTest], options: ProveOptions, foundry: Foundry ) -> None: self.options = options self.task_queue = Queue() @@ -403,12 +403,11 @@ def __init__( self.job_counter = 0 self.done_counter = 0 for test in initial_tests: - # if port is None: - # server = create_server(self.options, foundry=foundry) - # port = server.port - server = create_server(self.options, foundry=foundry) - port = server.port - self.servers[test.id] = server + port = options.port + if port is None: + server = create_server(self.options, foundry=foundry) + port = server.port + self.servers[test.id] = server self.task_queue.put(InitProofJob(test=test, port=port, options=self.options, foundry=foundry)) self.iterations[test.id] = 0 # self.kcfg_explore_map[test.id] = legacy_explore( diff --git a/src/tests/integration/test_foundry_prove.py b/src/tests/integration/test_foundry_prove.py index fe938cabf..40d7b5f78 100644 --- a/src/tests/integration/test_foundry_prove.py +++ b/src/tests/integration/test_foundry_prove.py @@ -138,8 +138,6 @@ def test_foundry_prove( ), ) - print(test_id) - # Then assert_pass(test_id, prove_res) @@ -352,7 +350,6 @@ def test_foundry_remove_node( def assert_pass(test: str, prove_res: list[APRProof]) -> None: - print(prove_res) proof = single([proof for proof in prove_res if proof.id.split(':')[0] == test]) if not proof.passed: assert proof.failure_info From fc16c012dc58130ad54678cf8b20df36d7f5d1fc Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Tue, 24 Oct 2023 21:52:01 -0500 Subject: [PATCH 24/27] Fix port --- src/kontrol/prove.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/kontrol/prove.py b/src/kontrol/prove.py index f28ff750b..3e742fe1d 100644 --- a/src/kontrol/prove.py +++ b/src/kontrol/prove.py @@ -97,9 +97,7 @@ def foundry_prove( test.method.update_digest(foundry.digest_file) def run_prover(test_suite: list[FoundryTest]) -> list[APRProof]: - scheduler = Scheduler( - workers=options.workers, initial_tests=test_suite, options=options, port=options.port, foundry=foundry - ) + scheduler = Scheduler(workers=options.workers, initial_tests=test_suite, options=options, foundry=foundry) scheduler.run() return scheduler.results @@ -389,9 +387,7 @@ def exec_process(task_queue: Queue, done_queue: Queue) -> None: job.execute(task_queue, done_queue) task_queue.task_done() - def __init__( - self, workers: int, initial_tests: list[FoundryTest], options: ProveOptions, foundry: Foundry - ) -> None: + def __init__(self, workers: int, initial_tests: list[FoundryTest], options: ProveOptions, foundry: Foundry) -> None: self.options = options self.task_queue = Queue() self.done_queue = Queue() From 92e280e92669ec7b7e472898268effb682d23f07 Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Tue, 24 Oct 2023 22:52:59 -0500 Subject: [PATCH 25/27] Share kcfg_explore per proof --- src/kontrol/prove.py | 391 +++++++++++++++++++++++-------------------- 1 file changed, 207 insertions(+), 184 deletions(-) diff --git a/src/kontrol/prove.py b/src/kontrol/prove.py index 3e742fe1d..24c58c227 100644 --- a/src/kontrol/prove.py +++ b/src/kontrol/prove.py @@ -19,8 +19,8 @@ from pyk.cterm import CTerm from pyk.kast.inner import KApply, KSequence, KVariable, Subst from pyk.kast.manip import flatten_label, set_cell -from pyk.kcfg import KCFG -from pyk.kore.rpc import kore_server +from pyk.kcfg import KCFG, KCFGExplore +from pyk.kore.rpc import KoreClient, kore_server from pyk.prelude.k import GENERATED_TOP_CELL from pyk.prelude.kbool import FALSE, notBool from pyk.prelude.kint import intToken @@ -39,7 +39,6 @@ from typing import Final from pyk.kast.inner import KInner - from pyk.kcfg import KCFGExplore from pyk.kcfg.explore import ExtendResult from pyk.kore.rpc import KoreServer @@ -180,6 +179,7 @@ class InitProofJob(Job): options: ProveOptions foundry: Foundry port: int + kcfg_explore: KCFGExplore def __init__( self, @@ -187,48 +187,51 @@ def __init__( options: ProveOptions, foundry: Foundry, port: int, + kcfg_explore: KCFGExplore, ) -> None: self.test = test self.options = options self.port = port self.foundry = foundry + self.kcfg_explore = kcfg_explore def execute(self, queue: Queue, done_queue: Queue) -> None: - llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None + # llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None print('execute InitProofJob') - with legacy_explore( - self.foundry.kevm, - kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), - id=self.test.id, - bug_report=self.options.bug_report, - kore_rpc_command=self.options.kore_rpc_command, - llvm_definition_dir=llvm_definition_dir, - smt_timeout=self.options.smt_timeout, - smt_retry_limit=self.options.smt_retry_limit, - trace_rewrites=self.options.trace_rewrites, - start_server=False, - port=self.port, - ) as kcfg_explore: - self.proof = method_to_apr_proof( - test=self.test, - foundry=self.foundry, - kcfg_explore=kcfg_explore, - bmc_depth=self.options.bmc_depth, - run_constructor=self.options.run_constructor, - ) - self.proof.write_proof_data() - for pending_node in self.proof.pending: - done_queue.put( - AdvanceProofJob( - test=self.test, - node_id=pending_node.id, - proof=self.proof, - options=self.options, - port=self.port, - foundry=self.foundry, - ) + # with legacy_explore( + # self.foundry.kevm, + # kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), + # id=self.test.id, + # bug_report=self.options.bug_report, + # kore_rpc_command=self.options.kore_rpc_command, + # llvm_definition_dir=llvm_definition_dir, + # smt_timeout=self.options.smt_timeout, + # smt_retry_limit=self.options.smt_retry_limit, + # trace_rewrites=self.options.trace_rewrites, + # start_server=False, + # port=self.port, + # ) as kcfg_explore: + self.proof = method_to_apr_proof( + test=self.test, + foundry=self.foundry, + kcfg_explore=self.kcfg_explore, + bmc_depth=self.options.bmc_depth, + run_constructor=self.options.run_constructor, + ) + self.proof.write_proof_data() + for pending_node in self.proof.pending: + done_queue.put( + AdvanceProofJob( + test=self.test, + node_id=pending_node.id, + proof=self.proof, + options=self.options, + port=self.port, + foundry=self.foundry, + kcfg_explore=self.kcfg_explore, ) + ) class AdvanceProofJob(Job): @@ -238,6 +241,7 @@ class AdvanceProofJob(Job): node_id: int port: int foundry: Foundry + kcfg_explore: KCFGExplore def __init__( self, @@ -247,6 +251,7 @@ def __init__( options: ProveOptions, port: int, foundry: Foundry, + kcfg_explore: KCFGExplore, ) -> None: self.test = test self.node_id = node_id @@ -254,47 +259,49 @@ def __init__( self.options = options self.port = port self.foundry = foundry + self.kcfg_explore = kcfg_explore def execute(self, queue: Queue, done_queue: Queue) -> None: print('execute AdvanceProofJob') - llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None - with legacy_explore( - self.foundry.kevm, - kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), - id=self.test.id, - bug_report=self.options.bug_report, - kore_rpc_command=self.options.kore_rpc_command, - llvm_definition_dir=llvm_definition_dir, - smt_timeout=self.options.smt_timeout, - smt_retry_limit=self.options.smt_retry_limit, - trace_rewrites=self.options.trace_rewrites, - start_server=False, - port=self.port, - ) as kcfg_explore: - curr_node = self.proof.kcfg.node(self.node_id) - terminal_rules = build_terminal_rules(options=self.options) - cut_point_rules = build_cut_point_rules(options=self.options) - - cterm = curr_node.cterm - - extend_result = kcfg_explore.extend_cterm( - cterm, - execute_depth=self.options.max_depth, - cut_point_rules=cut_point_rules, - terminal_rules=terminal_rules, - ) - print('Adding ExtendKCFGJob') - done_queue.put( - ExtendKCFGJob( - test=self.test, - proof=self.proof, - node_id=curr_node.id, - port=self.port, - extend_result=extend_result, - options=self.options, - foundry=self.foundry, - ) + # llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None + # with legacy_explore( + # self.foundry.kevm, + # kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), + # id=self.test.id, + # bug_report=self.options.bug_report, + # kore_rpc_command=self.options.kore_rpc_command, + # llvm_definition_dir=llvm_definition_dir, + # smt_timeout=self.options.smt_timeout, + # smt_retry_limit=self.options.smt_retry_limit, + # trace_rewrites=self.options.trace_rewrites, + # start_server=False, + # port=self.port, + # ) as kcfg_explore: + curr_node = self.proof.kcfg.node(self.node_id) + terminal_rules = build_terminal_rules(options=self.options) + cut_point_rules = build_cut_point_rules(options=self.options) + + cterm = curr_node.cterm + + extend_result = self.kcfg_explore.extend_cterm( + cterm, + execute_depth=self.options.max_depth, + cut_point_rules=cut_point_rules, + terminal_rules=terminal_rules, + ) + print('Adding ExtendKCFGJob') + done_queue.put( + ExtendKCFGJob( + test=self.test, + proof=self.proof, + node_id=curr_node.id, + port=self.port, + extend_result=extend_result, + options=self.options, + foundry=self.foundry, + kcfg_explore=self.kcfg_explore, ) + ) class ExtendKCFGJob(Job): @@ -305,6 +312,7 @@ class ExtendKCFGJob(Job): port: int extend_result: ExtendResult foundry: Foundry + kcfg_explore: KCFGExplore def __init__( self, @@ -315,6 +323,7 @@ def __init__( extend_result: ExtendResult, options: ProveOptions, foundry: Foundry, + kcfg_explore: KCFGExplore, ) -> None: self.test = test self.proof = proof @@ -323,30 +332,31 @@ def __init__( self.options = options self.extend_result = extend_result self.foundry = foundry + self.kcfg_explore = kcfg_explore def execute(self, queue: Queue, done_queue: Queue) -> None: print('execute ExtendKCFGJob') - llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None - with legacy_explore( - self.foundry.kevm, - kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), - id=self.test.id, - bug_report=self.options.bug_report, - kore_rpc_command=self.options.kore_rpc_command, - llvm_definition_dir=llvm_definition_dir, - smt_timeout=self.options.smt_timeout, - smt_retry_limit=self.options.smt_retry_limit, - trace_rewrites=self.options.trace_rewrites, - start_server=False, - port=self.port, - ) as kcfg_explore: - kcfg_explore.extend_kcfg( - self.extend_result, - kcfg=self.proof.kcfg, - node=self.proof.kcfg.node(self.node_id), - logs=self.proof.logs, - ) + # llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None + # with legacy_explore( + # self.foundry.kevm, + # kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), + # id=self.test.id, + # bug_report=self.options.bug_report, + # kore_rpc_command=self.options.kore_rpc_command, + # llvm_definition_dir=llvm_definition_dir, + # smt_timeout=self.options.smt_timeout, + # smt_retry_limit=self.options.smt_retry_limit, + # trace_rewrites=self.options.trace_rewrites, + # start_server=False, + # port=self.port, + # ) as kcfg_explore: + self.kcfg_explore.extend_kcfg( + self.extend_result, + kcfg=self.proof.kcfg, + node=self.proof.kcfg.node(self.node_id), + logs=self.proof.logs, + ) def create_server(options: ProveOptions, foundry: Foundry) -> KoreServer: @@ -364,6 +374,7 @@ def create_server(options: ProveOptions, foundry: Foundry) -> KoreServer: class Scheduler: servers: dict[str, KoreServer] + clients: dict[str, KoreClient] proofs: dict[FoundryTest, APRProof] threads: list[Thread] task_queue: Queue @@ -371,6 +382,7 @@ class Scheduler: options: ProveOptions iterations: dict[str, int] kcfg_explore_map: dict[str, KCFGExplore] + foundry: Foundry results: list[APRProof] @@ -395,6 +407,7 @@ def __init__(self, workers: int, initial_tests: list[FoundryTest], options: Prov self.results = [] self.iterations = {} self.servers = {} + self.clients = {} self.kcfg_explore_map = {} self.job_counter = 0 self.done_counter = 0 @@ -404,21 +417,27 @@ def __init__(self, workers: int, initial_tests: list[FoundryTest], options: Prov server = create_server(self.options, foundry=foundry) port = server.port self.servers[test.id] = server - self.task_queue.put(InitProofJob(test=test, port=port, options=self.options, foundry=foundry)) + + client = KoreClient('localhost', port, bug_report=self.options.bug_report) + self.clients[test.id] = client + self.iterations[test.id] = 0 - # self.kcfg_explore_map[test.id] = legacy_explore( - # self.foundry.kevm, - # kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), - # id=test.id, - # bug_report=self.options.bug_report, - # kore_rpc_command=self.options.kore_rpc_command, - # llvm_definition_dir=llvm_definition_dir, - # smt_timeout=self.options.smt_timeout, - # smt_retry_limit=self.options.smt_retry_limit, - # trace_rewrites=self.options.trace_rewrites, - # start_server=False, - # port=job.port, - # ) + self.kcfg_explore_map[test.id] = KCFGExplore( + kprint=self.foundry.kevm, + kore_client=client, + kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), + id=test.id, + trace_rewrites=options.trace_rewrites, + ) + self.task_queue.put( + InitProofJob( + test=test, + port=port, + options=self.options, + foundry=foundry, + kcfg_explore=self.kcfg_explore_map[test.id], + ) + ) self.job_counter += 1 self.threads = [ Thread(target=Scheduler.exec_process, args=(self.task_queue, self.done_queue), daemon=False) @@ -432,89 +451,91 @@ def sync_proof(self, job: ExtendKCFGJob) -> bool: self.iterations[job.test.id] += 1 job.execute(self.task_queue, self.done_queue) - llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None - - with legacy_explore( - self.foundry.kevm, - kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), - id=job.test.id, - bug_report=self.options.bug_report, - kore_rpc_command=self.options.kore_rpc_command, - llvm_definition_dir=llvm_definition_dir, - smt_timeout=self.options.smt_timeout, - smt_retry_limit=self.options.smt_retry_limit, - trace_rewrites=self.options.trace_rewrites, - start_server=False, - port=job.port, - ) as kcfg_explore: - prover = build_prover(options=self.options, proof=job.proof, kcfg_explore=kcfg_explore) - - node_printer = foundry_node_printer(self.foundry, job.test.contract.name, job.proof) - proof_show = APRProofShow(self.foundry.kevm, node_printer=node_printer) - res_lines = proof_show.show(job.proof) - print('\n'.join(res_lines)) - - prover._check_all_terminals() - - node_printer = foundry_node_printer(self.foundry, job.test.contract.name, job.proof) - proof_show = APRProofShow(self.foundry.kevm, node_printer=node_printer) - res_lines = proof_show.show(job.proof) - print('\n'.join(res_lines)) - - for node in job.proof.terminal: - if job.proof.kcfg.is_leaf(node.id) and not job.proof.is_target(node.id): - # TODO can we have a worker thread check subsumtion? - prover._check_subsume(node) - - if job.proof.failed: - prover.save_failure_info() - if self.options.fail_fast and job.proof.failed: - _LOGGER.warning( - f'Terminating proof early because fail_fast is set {job.proof.id}, failing nodes: {[nd.id for nd in job.proof.failing]}' - ) - return True - if not job.proof.pending: - return True + # llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None + + # with legacy_explore( + # self.foundry.kevm, + # kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), + # id=job.test.id, + # bug_report=self.options.bug_report, + # kore_rpc_command=self.options.kore_rpc_command, + # llvm_definition_dir=llvm_definition_dir, + # smt_timeout=self.options.smt_timeout, + # smt_retry_limit=self.options.smt_retry_limit, + # trace_rewrites=self.options.trace_rewrites, + # start_server=False, + # port=job.port, + # ) as kcfg_explore: + kcfg_explore = self.kcfg_explore_map[job.test.id] + prover = build_prover(options=self.options, proof=job.proof, kcfg_explore=kcfg_explore) + + node_printer = foundry_node_printer(self.foundry, job.test.contract.name, job.proof) + proof_show = APRProofShow(self.foundry.kevm, node_printer=node_printer) + res_lines = proof_show.show(job.proof) + print('\n'.join(res_lines)) + + prover._check_all_terminals() + + node_printer = foundry_node_printer(self.foundry, job.test.contract.name, job.proof) + proof_show = APRProofShow(self.foundry.kevm, node_printer=node_printer) + res_lines = proof_show.show(job.proof) + print('\n'.join(res_lines)) + + for node in job.proof.terminal: + if job.proof.kcfg.is_leaf(node.id) and not job.proof.is_target(node.id): + # TODO can we have a worker thread check subsumtion? + prover._check_subsume(node) + + if job.proof.failed: + prover.save_failure_info() + if self.options.fail_fast and job.proof.failed: + _LOGGER.warning( + f'Terminating proof early because fail_fast is set {job.proof.id}, failing nodes: {[nd.id for nd in job.proof.failing]}' + ) + return True + if not job.proof.pending: + return True - for pending_node in job.proof.pending: - print(f'checking pending for node {pending_node.id}') - if pending_node not in job.proof.kcfg.reachable_nodes(source_id=job.node_id): + for pending_node in job.proof.pending: + print(f'checking pending for node {pending_node.id}') + if pending_node not in job.proof.kcfg.reachable_nodes(source_id=job.node_id): + continue + + if type(job.proof) is APRBMCProof: + node = job.proof.kcfg.node(pending_node.id) + + _LOGGER.info(f'Checking bmc depth for node {job.proof.id}: {pending_node.id}') + _prior_loops = [ + succ.source.id + for succ in job.proof.shortest_path_to(pending_node.id) + if kcfg_explore.kcfg_semantics.same_loop(succ.source.cterm, node.cterm) + ] + prior_loops: list[int] = [] + for _pl in _prior_loops: + if not ( + job.proof.kcfg.zero_depth_between(_pl, node.id) + or any(job.proof.kcfg.zero_depth_between(_pl, pl) for pl in prior_loops) + ): + prior_loops.append(_pl) + _LOGGER.info(f'Prior loop heads for node {job.proof.id}: {(node.id, prior_loops)}') + if len(prior_loops) > job.proof.bmc_depth: + job.proof.add_bounded(node.id) continue - if type(job.proof) is APRBMCProof: - node = job.proof.kcfg.node(pending_node.id) - - _LOGGER.info(f'Checking bmc depth for node {job.proof.id}: {pending_node.id}') - _prior_loops = [ - succ.source.id - for succ in job.proof.shortest_path_to(pending_node.id) - if kcfg_explore.kcfg_semantics.same_loop(succ.source.cterm, node.cterm) - ] - prior_loops: list[int] = [] - for _pl in _prior_loops: - if not ( - job.proof.kcfg.zero_depth_between(_pl, node.id) - or any(job.proof.kcfg.zero_depth_between(_pl, pl) for pl in prior_loops) - ): - prior_loops.append(_pl) - _LOGGER.info(f'Prior loop heads for node {job.proof.id}: {(node.id, prior_loops)}') - if len(prior_loops) > job.proof.bmc_depth: - job.proof.add_bounded(node.id) - continue - - self.task_queue.put( - AdvanceProofJob( - test=job.test, - node_id=pending_node.id, - proof=job.proof, - options=self.options, - port=job.port, - foundry=self.foundry, - ) + self.task_queue.put( + AdvanceProofJob( + test=job.test, + node_id=pending_node.id, + proof=job.proof, + options=self.options, + port=job.port, + foundry=self.foundry, + kcfg_explore=self.kcfg_explore_map[job.test.id], ) - self.job_counter += 1 + ) + self.job_counter += 1 - return False + return False def run(self) -> None: for thread in self.threads: @@ -542,6 +563,8 @@ def run(self) -> None: for server in self.servers.values(): server.close() + for client in self.clients.values(): + client.close() self.task_queue.join() for thread in self.threads: From d27141c0b655812c764253eb6f0ddf48615f16a7 Mon Sep 17 00:00:00 2001 From: Noah Watson Date: Wed, 25 Oct 2023 19:39:26 -0500 Subject: [PATCH 26/27] Revert using shared kcfg_explore --- src/kontrol/prove.py | 387 ++++++++++++++++++++----------------------- 1 file changed, 183 insertions(+), 204 deletions(-) diff --git a/src/kontrol/prove.py b/src/kontrol/prove.py index 24c58c227..a3a04df38 100644 --- a/src/kontrol/prove.py +++ b/src/kontrol/prove.py @@ -19,8 +19,8 @@ from pyk.cterm import CTerm from pyk.kast.inner import KApply, KSequence, KVariable, Subst from pyk.kast.manip import flatten_label, set_cell -from pyk.kcfg import KCFG, KCFGExplore -from pyk.kore.rpc import KoreClient, kore_server +from pyk.kcfg import KCFG +from pyk.kore.rpc import kore_server from pyk.prelude.k import GENERATED_TOP_CELL from pyk.prelude.kbool import FALSE, notBool from pyk.prelude.kint import intToken @@ -34,6 +34,7 @@ from .solc_to_k import Contract if TYPE_CHECKING: + from pyk.kcfg import KCFGExplore from collections.abc import Iterable from pathlib import Path from typing import Final @@ -179,7 +180,6 @@ class InitProofJob(Job): options: ProveOptions foundry: Foundry port: int - kcfg_explore: KCFGExplore def __init__( self, @@ -187,51 +187,48 @@ def __init__( options: ProveOptions, foundry: Foundry, port: int, - kcfg_explore: KCFGExplore, ) -> None: self.test = test self.options = options self.port = port self.foundry = foundry - self.kcfg_explore = kcfg_explore def execute(self, queue: Queue, done_queue: Queue) -> None: - # llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None + llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None print('execute InitProofJob') - # with legacy_explore( - # self.foundry.kevm, - # kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), - # id=self.test.id, - # bug_report=self.options.bug_report, - # kore_rpc_command=self.options.kore_rpc_command, - # llvm_definition_dir=llvm_definition_dir, - # smt_timeout=self.options.smt_timeout, - # smt_retry_limit=self.options.smt_retry_limit, - # trace_rewrites=self.options.trace_rewrites, - # start_server=False, - # port=self.port, - # ) as kcfg_explore: - self.proof = method_to_apr_proof( - test=self.test, - foundry=self.foundry, - kcfg_explore=self.kcfg_explore, - bmc_depth=self.options.bmc_depth, - run_constructor=self.options.run_constructor, - ) - self.proof.write_proof_data() - for pending_node in self.proof.pending: - done_queue.put( - AdvanceProofJob( - test=self.test, - node_id=pending_node.id, - proof=self.proof, - options=self.options, - port=self.port, - foundry=self.foundry, - kcfg_explore=self.kcfg_explore, - ) + with legacy_explore( + self.foundry.kevm, + kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), + id=self.test.id, + bug_report=self.options.bug_report, + kore_rpc_command=self.options.kore_rpc_command, + llvm_definition_dir=llvm_definition_dir, + smt_timeout=self.options.smt_timeout, + smt_retry_limit=self.options.smt_retry_limit, + trace_rewrites=self.options.trace_rewrites, + start_server=False, + port=self.port, + ) as kcfg_explore: + self.proof = method_to_apr_proof( + test=self.test, + foundry=self.foundry, + kcfg_explore=kcfg_explore, + bmc_depth=self.options.bmc_depth, + run_constructor=self.options.run_constructor, ) + self.proof.write_proof_data() + for pending_node in self.proof.pending: + done_queue.put( + AdvanceProofJob( + test=self.test, + node_id=pending_node.id, + proof=self.proof, + options=self.options, + port=self.port, + foundry=self.foundry, + ) + ) class AdvanceProofJob(Job): @@ -241,7 +238,6 @@ class AdvanceProofJob(Job): node_id: int port: int foundry: Foundry - kcfg_explore: KCFGExplore def __init__( self, @@ -251,7 +247,6 @@ def __init__( options: ProveOptions, port: int, foundry: Foundry, - kcfg_explore: KCFGExplore, ) -> None: self.test = test self.node_id = node_id @@ -259,49 +254,47 @@ def __init__( self.options = options self.port = port self.foundry = foundry - self.kcfg_explore = kcfg_explore def execute(self, queue: Queue, done_queue: Queue) -> None: print('execute AdvanceProofJob') - # llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None - # with legacy_explore( - # self.foundry.kevm, - # kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), - # id=self.test.id, - # bug_report=self.options.bug_report, - # kore_rpc_command=self.options.kore_rpc_command, - # llvm_definition_dir=llvm_definition_dir, - # smt_timeout=self.options.smt_timeout, - # smt_retry_limit=self.options.smt_retry_limit, - # trace_rewrites=self.options.trace_rewrites, - # start_server=False, - # port=self.port, - # ) as kcfg_explore: - curr_node = self.proof.kcfg.node(self.node_id) - terminal_rules = build_terminal_rules(options=self.options) - cut_point_rules = build_cut_point_rules(options=self.options) - - cterm = curr_node.cterm - - extend_result = self.kcfg_explore.extend_cterm( - cterm, - execute_depth=self.options.max_depth, - cut_point_rules=cut_point_rules, - terminal_rules=terminal_rules, - ) - print('Adding ExtendKCFGJob') - done_queue.put( - ExtendKCFGJob( - test=self.test, - proof=self.proof, - node_id=curr_node.id, - port=self.port, - extend_result=extend_result, - options=self.options, - foundry=self.foundry, - kcfg_explore=self.kcfg_explore, + llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None + with legacy_explore( + self.foundry.kevm, + kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), + id=self.test.id, + bug_report=self.options.bug_report, + kore_rpc_command=self.options.kore_rpc_command, + llvm_definition_dir=llvm_definition_dir, + smt_timeout=self.options.smt_timeout, + smt_retry_limit=self.options.smt_retry_limit, + trace_rewrites=self.options.trace_rewrites, + start_server=False, + port=self.port, + ) as kcfg_explore: + curr_node = self.proof.kcfg.node(self.node_id) + terminal_rules = build_terminal_rules(options=self.options) + cut_point_rules = build_cut_point_rules(options=self.options) + + cterm = curr_node.cterm + + extend_result = kcfg_explore.extend_cterm( + cterm, + execute_depth=self.options.max_depth, + cut_point_rules=cut_point_rules, + terminal_rules=terminal_rules, + ) + print('Adding ExtendKCFGJob') + done_queue.put( + ExtendKCFGJob( + test=self.test, + proof=self.proof, + node_id=curr_node.id, + port=self.port, + extend_result=extend_result, + options=self.options, + foundry=self.foundry, + ) ) - ) class ExtendKCFGJob(Job): @@ -312,7 +305,6 @@ class ExtendKCFGJob(Job): port: int extend_result: ExtendResult foundry: Foundry - kcfg_explore: KCFGExplore def __init__( self, @@ -323,7 +315,6 @@ def __init__( extend_result: ExtendResult, options: ProveOptions, foundry: Foundry, - kcfg_explore: KCFGExplore, ) -> None: self.test = test self.proof = proof @@ -332,31 +323,30 @@ def __init__( self.options = options self.extend_result = extend_result self.foundry = foundry - self.kcfg_explore = kcfg_explore def execute(self, queue: Queue, done_queue: Queue) -> None: print('execute ExtendKCFGJob') - # llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None - # with legacy_explore( - # self.foundry.kevm, - # kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), - # id=self.test.id, - # bug_report=self.options.bug_report, - # kore_rpc_command=self.options.kore_rpc_command, - # llvm_definition_dir=llvm_definition_dir, - # smt_timeout=self.options.smt_timeout, - # smt_retry_limit=self.options.smt_retry_limit, - # trace_rewrites=self.options.trace_rewrites, - # start_server=False, - # port=self.port, - # ) as kcfg_explore: - self.kcfg_explore.extend_kcfg( - self.extend_result, - kcfg=self.proof.kcfg, - node=self.proof.kcfg.node(self.node_id), - logs=self.proof.logs, - ) + llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None + with legacy_explore( + self.foundry.kevm, + kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), + id=self.test.id, + bug_report=self.options.bug_report, + kore_rpc_command=self.options.kore_rpc_command, + llvm_definition_dir=llvm_definition_dir, + smt_timeout=self.options.smt_timeout, + smt_retry_limit=self.options.smt_retry_limit, + trace_rewrites=self.options.trace_rewrites, + start_server=False, + port=self.port, + ) as kcfg_explore: + kcfg_explore.extend_kcfg( + self.extend_result, + kcfg=self.proof.kcfg, + node=self.proof.kcfg.node(self.node_id), + logs=self.proof.logs, + ) def create_server(options: ProveOptions, foundry: Foundry) -> KoreServer: @@ -374,14 +364,14 @@ def create_server(options: ProveOptions, foundry: Foundry) -> KoreServer: class Scheduler: servers: dict[str, KoreServer] - clients: dict[str, KoreClient] + clients: dict[str, KoreServer] proofs: dict[FoundryTest, APRProof] threads: list[Thread] task_queue: Queue done_queue: Queue options: ProveOptions iterations: dict[str, int] - kcfg_explore_map: dict[str, KCFGExplore] +# kcfg_explore_map: dict[str, KCFGExplore] foundry: Foundry @@ -407,8 +397,7 @@ def __init__(self, workers: int, initial_tests: list[FoundryTest], options: Prov self.results = [] self.iterations = {} self.servers = {} - self.clients = {} - self.kcfg_explore_map = {} +# self.kcfg_explore_map = {} self.job_counter = 0 self.done_counter = 0 for test in initial_tests: @@ -418,26 +407,18 @@ def __init__(self, workers: int, initial_tests: list[FoundryTest], options: Prov port = server.port self.servers[test.id] = server - client = KoreClient('localhost', port, bug_report=self.options.bug_report) - self.clients[test.id] = client +# client = KoreClient('localhost', port, bug_report=bug_report) +# self.clients[test.id] = client + self.task_queue.put(InitProofJob(test=test, port=port, options=self.options, foundry=foundry)) self.iterations[test.id] = 0 - self.kcfg_explore_map[test.id] = KCFGExplore( - kprint=self.foundry.kevm, - kore_client=client, - kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), - id=test.id, - trace_rewrites=options.trace_rewrites, - ) - self.task_queue.put( - InitProofJob( - test=test, - port=port, - options=self.options, - foundry=foundry, - kcfg_explore=self.kcfg_explore_map[test.id], - ) - ) +# self.kcfg_explore_map[test.id] = KCFGExplore( +# kprint=self.foundry.kevm, +# kore_client=client, +# kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), +# id=test.id, +# trace_rewrites=options.trace_rewrites, +# ) self.job_counter += 1 self.threads = [ Thread(target=Scheduler.exec_process, args=(self.task_queue, self.done_queue), daemon=False) @@ -450,92 +431,90 @@ def sync_proof(self, job: ExtendKCFGJob) -> bool: return True self.iterations[job.test.id] += 1 - job.execute(self.task_queue, self.done_queue) - # llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None - - # with legacy_explore( - # self.foundry.kevm, - # kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), - # id=job.test.id, - # bug_report=self.options.bug_report, - # kore_rpc_command=self.options.kore_rpc_command, - # llvm_definition_dir=llvm_definition_dir, - # smt_timeout=self.options.smt_timeout, - # smt_retry_limit=self.options.smt_retry_limit, - # trace_rewrites=self.options.trace_rewrites, - # start_server=False, - # port=job.port, - # ) as kcfg_explore: - kcfg_explore = self.kcfg_explore_map[job.test.id] - prover = build_prover(options=self.options, proof=job.proof, kcfg_explore=kcfg_explore) - - node_printer = foundry_node_printer(self.foundry, job.test.contract.name, job.proof) - proof_show = APRProofShow(self.foundry.kevm, node_printer=node_printer) - res_lines = proof_show.show(job.proof) - print('\n'.join(res_lines)) - - prover._check_all_terminals() - - node_printer = foundry_node_printer(self.foundry, job.test.contract.name, job.proof) - proof_show = APRProofShow(self.foundry.kevm, node_printer=node_printer) - res_lines = proof_show.show(job.proof) - print('\n'.join(res_lines)) - - for node in job.proof.terminal: - if job.proof.kcfg.is_leaf(node.id) and not job.proof.is_target(node.id): - # TODO can we have a worker thread check subsumtion? - prover._check_subsume(node) - - if job.proof.failed: - prover.save_failure_info() - if self.options.fail_fast and job.proof.failed: - _LOGGER.warning( - f'Terminating proof early because fail_fast is set {job.proof.id}, failing nodes: {[nd.id for nd in job.proof.failing]}' - ) - return True - if not job.proof.pending: - return True + llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None +# llvm_definition_dir = self.foundry.llvm_library if self.options.use_booster else None + with legacy_explore( + self.foundry.kevm, + kcfg_semantics=KEVMSemantics(auto_abstract_gas=self.options.auto_abstract_gas), + id=job.test.id, + bug_report=self.options.bug_report, + kore_rpc_command=self.options.kore_rpc_command, + llvm_definition_dir=llvm_definition_dir, + smt_timeout=self.options.smt_timeout, + smt_retry_limit=self.options.smt_retry_limit, + trace_rewrites=self.options.trace_rewrites, + start_server=False, + port=job.port, + ) as kcfg_explore: + prover = build_prover(options=self.options, proof=job.proof, kcfg_explore=kcfg_explore) + + node_printer = foundry_node_printer(self.foundry, job.test.contract.name, job.proof) + proof_show = APRProofShow(self.foundry.kevm, node_printer=node_printer) + res_lines = proof_show.show(job.proof) + print('\n'.join(res_lines)) + + prover._check_all_terminals() + + node_printer = foundry_node_printer(self.foundry, job.test.contract.name, job.proof) + proof_show = APRProofShow(self.foundry.kevm, node_printer=node_printer) + res_lines = proof_show.show(job.proof) + print('\n'.join(res_lines)) + + for node in job.proof.terminal: + if job.proof.kcfg.is_leaf(node.id) and not job.proof.is_target(node.id): + # TODO can we have a worker thread check subsumtion? + prover._check_subsume(node) + + if job.proof.failed: + prover.save_failure_info() + if self.options.fail_fast and job.proof.failed: + _LOGGER.warning( + f'Terminating proof early because fail_fast is set {job.proof.id}, failing nodes: {[nd.id for nd in job.proof.failing]}' + ) + return True + if not job.proof.pending: + return True - for pending_node in job.proof.pending: - print(f'checking pending for node {pending_node.id}') - if pending_node not in job.proof.kcfg.reachable_nodes(source_id=job.node_id): - continue - - if type(job.proof) is APRBMCProof: - node = job.proof.kcfg.node(pending_node.id) - - _LOGGER.info(f'Checking bmc depth for node {job.proof.id}: {pending_node.id}') - _prior_loops = [ - succ.source.id - for succ in job.proof.shortest_path_to(pending_node.id) - if kcfg_explore.kcfg_semantics.same_loop(succ.source.cterm, node.cterm) - ] - prior_loops: list[int] = [] - for _pl in _prior_loops: - if not ( - job.proof.kcfg.zero_depth_between(_pl, node.id) - or any(job.proof.kcfg.zero_depth_between(_pl, pl) for pl in prior_loops) - ): - prior_loops.append(_pl) - _LOGGER.info(f'Prior loop heads for node {job.proof.id}: {(node.id, prior_loops)}') - if len(prior_loops) > job.proof.bmc_depth: - job.proof.add_bounded(node.id) + for pending_node in job.proof.pending: + print(f'checking pending for node {pending_node.id}') + if pending_node not in job.proof.kcfg.reachable_nodes(source_id=job.node_id): continue - self.task_queue.put( - AdvanceProofJob( - test=job.test, - node_id=pending_node.id, - proof=job.proof, - options=self.options, - port=job.port, - foundry=self.foundry, - kcfg_explore=self.kcfg_explore_map[job.test.id], + if type(job.proof) is APRBMCProof: + node = job.proof.kcfg.node(pending_node.id) + + _LOGGER.info(f'Checking bmc depth for node {job.proof.id}: {pending_node.id}') + _prior_loops = [ + succ.source.id + for succ in job.proof.shortest_path_to(pending_node.id) + if kcfg_explore.kcfg_semantics.same_loop(succ.source.cterm, node.cterm) + ] + prior_loops: list[int] = [] + for _pl in _prior_loops: + if not ( + job.proof.kcfg.zero_depth_between(_pl, node.id) + or any(job.proof.kcfg.zero_depth_between(_pl, pl) for pl in prior_loops) + ): + prior_loops.append(_pl) + _LOGGER.info(f'Prior loop heads for node {job.proof.id}: {(node.id, prior_loops)}') + if len(prior_loops) > job.proof.bmc_depth: + job.proof.add_bounded(node.id) + continue + + self.task_queue.put( + AdvanceProofJob( + test=job.test, + node_id=pending_node.id, + proof=job.proof, + options=self.options, + port=job.port, + foundry=self.foundry, + ) ) - ) + self.job_counter += 1 self.job_counter += 1 - return False + return False def run(self) -> None: for thread in self.threads: From 640e55e3786ea62a93c8587230bacbd33fc26495 Mon Sep 17 00:00:00 2001 From: devops Date: Mon, 30 Oct 2023 20:54:50 +0000 Subject: [PATCH 27/27] Set Version: 0.1.44 --- package/version | 2 +- pyproject.toml | 2 +- src/kontrol/__init__.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package/version b/package/version index 278cbc8db..6871b9e7a 100644 --- a/package/version +++ b/package/version @@ -1 +1 @@ -0.1.43 +0.1.44 diff --git a/pyproject.toml b/pyproject.toml index d0d0488f4..ee74a5554 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "kontrol" -version = "0.1.43" +version = "0.1.44" description = "Foundry integration for KEVM" authors = [ "Runtime Verification, Inc. ", diff --git a/src/kontrol/__init__.py b/src/kontrol/__init__.py index 413bd5787..b4b89deb9 100644 --- a/src/kontrol/__init__.py +++ b/src/kontrol/__init__.py @@ -5,4 +5,4 @@ if TYPE_CHECKING: from typing import Final -VERSION: Final = '0.1.43' +VERSION: Final = '0.1.44'