From b69955383192d864cc394b851e114f3d685a8ab0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Agust=C3=ADn=20Borgna?= <121866228+aborgna-q@users.noreply.github.com> Date: Wed, 4 Oct 2023 11:19:33 +0200 Subject: [PATCH 1/3] chore: Small code cleanups (#158) - Remove duplicated tk1 writer fn - Better .dot debug API --- src/json.rs | 9 ++++++--- src/utils.rs | 17 +++++++++++++++-- taso-optimiser/src/main.rs | 14 ++------------ 3 files changed, 23 insertions(+), 17 deletions(-) diff --git a/src/json.rs b/src/json.rs index c08afba5..f6f6f94f 100644 --- a/src/json.rs +++ b/src/json.rs @@ -129,21 +129,24 @@ pub fn load_tk1_json_str(json: &str) -> Result { } /// Save a circuit to file in TK1 JSON format. -pub fn save_tk1_json_file(path: impl AsRef, circ: &Hugr) -> Result<(), TK1ConvertError> { +pub fn save_tk1_json_file( + circ: &impl Circuit, + path: impl AsRef, +) -> Result<(), TK1ConvertError> { let file = fs::File::create(path)?; let writer = io::BufWriter::new(file); save_tk1_json_writer(circ, writer) } /// Save a circuit in TK1 JSON format to a writer. -pub fn save_tk1_json_writer(circ: &Hugr, w: impl io::Write) -> Result<(), TK1ConvertError> { +pub fn save_tk1_json_writer(circ: &impl Circuit, w: impl io::Write) -> Result<(), TK1ConvertError> { let serial_circ = SerialCircuit::encode(circ)?; serde_json::to_writer(w, &serial_circ)?; Ok(()) } /// Save a circuit in TK1 JSON format to a String. -pub fn save_tk1_json_str(circ: &Hugr) -> Result { +pub fn save_tk1_json_str(circ: &impl Circuit) -> Result { let mut buf = io::BufWriter::new(Vec::new()); save_tk1_json_writer(circ, &mut buf)?; let bytes = buf.into_inner().unwrap(); diff --git a/src/utils.rs b/src/utils.rs index 124cda37..22456624 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -36,13 +36,26 @@ pub(crate) fn build_simple_circuit( #[allow(dead_code)] #[cfg(test)] pub(crate) mod test { + #[allow(unused_imports)] + use hugr::HugrView; + /// Open a browser page to render a dot string graph. /// /// This can be used directly on the output of `Hugr::dot_string` + /// + /// Only for use in local testing. Will fail to compile on CI. #[cfg(not(ci_run))] - pub(crate) fn viz_dotstr(dotstr: &str) { + pub(crate) fn viz_dotstr(dotstr: impl AsRef) { let mut base: String = "https://dreampuf.github.io/GraphvizOnline/#".into(); - base.push_str(&urlencoding::encode(dotstr)); + base.push_str(&urlencoding::encode(dotstr.as_ref())); webbrowser::open(&base).unwrap(); } + + /// Open a browser page to render a HugrView's dot string graph. + /// + /// Only for use in local testing. Will fail to compile on CI. + #[cfg(not(ci_run))] + pub(crate) fn viz_hugr(hugr: &impl HugrView) { + viz_dotstr(hugr.dot_string()); + } } diff --git a/taso-optimiser/src/main.rs b/taso-optimiser/src/main.rs index c31afb0d..1d06c646 100644 --- a/taso-optimiser/src/main.rs +++ b/taso-optimiser/src/main.rs @@ -10,11 +10,9 @@ use std::path::PathBuf; use std::process::exit; use clap::Parser; -use hugr::Hugr; -use tket2::json::{load_tk1_json_file, TKETDecode}; +use tket2::json::{load_tk1_json_file, save_tk1_json_file}; use tket2::optimiser::taso::log::TasoLogger; use tket2::optimiser::TasoOptimiser; -use tket_json_rs::circuit_json::SerialCircuit; #[cfg(feature = "peak_alloc")] use peak_alloc::PeakAlloc; @@ -82,14 +80,6 @@ struct CmdLineArgs { n_threads: Option, } -fn save_tk1_json_file(path: impl AsRef, circ: &Hugr) -> Result<(), std::io::Error> { - let file = File::create(path)?; - let writer = BufWriter::new(file); - let serial_circ = SerialCircuit::encode(circ).unwrap(); - serde_json::to_writer_pretty(writer, &serial_circ)?; - Ok(()) -} - fn main() -> Result<(), Box> { let opts = CmdLineArgs::parse(); @@ -129,7 +119,7 @@ fn main() -> Result<(), Box> { let opt_circ = optimiser.optimise_with_log(&circ, taso_logger, opts.timeout, n_threads); println!("Saving result"); - save_tk1_json_file(output_path, &opt_circ)?; + save_tk1_json_file(&opt_circ, output_path)?; #[cfg(feature = "peak_alloc")] println!("Peak memory usage: {} GB", PEAK_ALLOC.peak_usage_as_gb()); From 796323c15a8273d998c50c2acebc85e9889d0b55 Mon Sep 17 00:00:00 2001 From: Luca Mondada <72734770+lmondada@users.noreply.github.com> Date: Wed, 4 Oct 2023 12:19:39 +0200 Subject: [PATCH 2/3] chore: Move hashing to queueing thread (#156) --- src/optimiser/taso.rs | 133 ++++++++--------- src/optimiser/taso/hugr_pchannel.rs | 214 +++++++++++++++++++++------- src/optimiser/taso/hugr_pqueue.rs | 20 ++- src/optimiser/taso/worker.rs | 2 +- 4 files changed, 237 insertions(+), 132 deletions(-) diff --git a/src/optimiser/taso.rs b/src/optimiser/taso.rs index 7fc4c9b0..a6045a43 100644 --- a/src/optimiser/taso.rs +++ b/src/optimiser/taso.rs @@ -20,17 +20,17 @@ mod worker; use crossbeam_channel::select; pub use eq_circ_class::{load_eccs_json_file, EqCircClass}; +use fxhash::FxHashSet; pub use log::TasoLogger; use std::fmt; use std::num::NonZeroUsize; use std::time::{Duration, Instant}; -use fxhash::FxHashSet; use hugr::Hugr; use crate::circuit::CircuitHash; -use crate::optimiser::taso::hugr_pchannel::HugrPriorityChannel; +use crate::optimiser::taso::hugr_pchannel::{HugrPriorityChannel, PriorityChannelLog}; use crate::optimiser::taso::hugr_pqueue::{Entry, HugrPQ}; use crate::optimiser::taso::worker::TasoWorker; use crate::rewrite::strategy::RewriteStrategy; @@ -81,7 +81,10 @@ where /// Run the TASO optimiser on a circuit. /// /// A timeout (in seconds) can be provided. - pub fn optimise(&self, circ: &Hugr, timeout: Option, n_threads: NonZeroUsize) -> Hugr { + pub fn optimise(&self, circ: &Hugr, timeout: Option, n_threads: NonZeroUsize) -> Hugr + where + S::Cost: Send + Sync + Clone, + { self.optimise_with_log(circ, Default::default(), timeout, n_threads) } @@ -94,7 +97,10 @@ where log_config: TasoLogger, timeout: Option, n_threads: NonZeroUsize, - ) -> Hugr { + ) -> Hugr + where + S::Cost: Send + Sync + Clone, + { match n_threads.get() { 1 => self.taso(circ, log_config, timeout), _ => self.taso_multithreaded(circ, log_config, timeout, n_threads), @@ -110,7 +116,8 @@ where logger.log_best(&best_circ_cost); // Hash of seen circuits. Dot not store circuits as this map gets huge - let mut seen_hashes: FxHashSet<_> = FromIterator::from_iter([(circ.circuit_hash())]); + let mut seen_hashes = FxHashSet::default(); + seen_hashes.insert(circ.circuit_hash()); // The priority queue of circuits to be processed (this should not get big) const PRIORITY_QUEUE_CAPACITY: usize = 10_000; @@ -133,13 +140,14 @@ where let rewrites = self.rewriter.get_rewrites(&circ); for new_circ in self.strategy.apply_rewrites(rewrites, &circ) { let new_circ_hash = new_circ.circuit_hash(); - circ_cnt += 1; logger.log_progress(circ_cnt, Some(pq.len()), seen_hashes.len()); - if seen_hashes.contains(&new_circ_hash) { + if !seen_hashes.insert(new_circ_hash) { + // Ignore this circuit: we've already seen it continue; } - pq.push_with_hash_unchecked(new_circ, new_circ_hash); - seen_hashes.insert(new_circ_hash); + circ_cnt += 1; + let new_circ_cost = self.cost(&new_circ); + pq.push_unchecked(new_circ, new_circ_hash, new_circ_cost); } if pq.len() >= PRIORITY_QUEUE_CAPACITY { @@ -170,7 +178,10 @@ where mut logger: TasoLogger, timeout: Option, n_threads: NonZeroUsize, - ) -> Hugr { + ) -> Hugr + where + S::Cost: Send + Sync + Clone, + { let n_threads: usize = n_threads.get(); const PRIORITY_QUEUE_CAPACITY: usize = 10_000; @@ -179,51 +190,36 @@ where let strategy = self.strategy.clone(); move |circ: &'_ Hugr| strategy.circuit_cost(circ) }; - let (tx_work, rx_work) = - HugrPriorityChannel::init(cost_fn, PRIORITY_QUEUE_CAPACITY * n_threads); - // channel for sending circuits from threads back to main - let (tx_result, rx_result) = crossbeam_channel::unbounded(); + let mut pq = HugrPriorityChannel::init(cost_fn, PRIORITY_QUEUE_CAPACITY * n_threads); let initial_circ_hash = circ.circuit_hash(); let mut best_circ = circ.clone(); let mut best_circ_cost = self.cost(&best_circ); - logger.log_best(&best_circ_cost); - - // Hash of seen circuits. Dot not store circuits as this map gets huge - let mut seen_hashes: FxHashSet<_> = FromIterator::from_iter([(initial_circ_hash)]); // Each worker waits for circuits to scan for rewrites using all the // patterns and sends the results back to main. let joins: Vec<_> = (0..n_threads) .map(|i| { TasoWorker::spawn( - rx_work.clone(), - tx_result.clone(), + pq.pop.clone().unwrap(), + pq.push.clone().unwrap(), self.rewriter.clone(), self.strategy.clone(), Some(format!("taso-worker-{i}")), ) }) .collect(); - // Drop our copy of the worker channels, so we don't count as a - // connected worker. - drop(rx_work); - drop(tx_result); // Queue the initial circuit - tx_work + pq.push + .as_ref() + .unwrap() .send(vec![(initial_circ_hash, circ.clone())]) .unwrap(); + // Drop our copy of the priority queue channels, so we don't count as a + // connected worker. + pq.drop_pop_push(); - // A counter of circuits seen. - let mut circ_cnt = 1; - - // A counter of jobs sent to the workers. - #[allow(unused)] - let mut jobs_sent = 0usize; - // A counter of completed jobs received from the workers. - #[allow(unused)] - let mut jobs_completed = 0usize; // TODO: Report dropped jobs in the queue, so we can check for termination. // Deadline for the optimisation timeout @@ -232,66 +228,51 @@ where Some(t) => crossbeam_channel::at(Instant::now() + Duration::from_secs(t)), }; - // Process worker results until we have seen all the circuits, or we run - // out of time. + // Main loop: log best circuits as they come in from the priority queue, + // until the timeout is reached. let mut timeout_flag = false; loop { select! { - recv(rx_result) -> msg => { + recv(pq.log) -> msg => { match msg { - Ok(hashed_circs) => { - let send_result = tracing::trace_span!(target: "taso::metrics", "recv_result").in_scope(|| { - jobs_completed += 1; - for (circ_hash, circ) in &hashed_circs { - circ_cnt += 1; - logger.log_progress(circ_cnt, None, seen_hashes.len()); - if seen_hashes.contains(circ_hash) { - continue; - } - seen_hashes.insert(*circ_hash); - - let cost = self.cost(circ); - - // Check if we got a new best circuit - if cost < best_circ_cost { - best_circ = circ.clone(); - best_circ_cost = cost; - logger.log_best(&best_circ_cost); - } - jobs_sent += 1; - } - // Fill the workqueue with data from pq - tx_work.send(hashed_circs) - }); - if send_result.is_err() { - eprintln!("All our workers panicked. Stopping optimisation."); - break; - } - - // If there is no more data to process, we are done. - // - // TODO: Report dropped jobs in the workers, so we can check for termination. - //if jobs_sent == jobs_completed { - // break 'main; - //}; + Ok(PriorityChannelLog::NewBestCircuit(circ, cost)) => { + best_circ = circ; + best_circ_cost = cost; + logger.log_best(&best_circ_cost); }, + Ok(PriorityChannelLog::CircuitCount(circuit_cnt, seen_cnt)) => { + logger.log_progress(circuit_cnt, None, seen_cnt); + } Err(crossbeam_channel::RecvError) => { - eprintln!("All our workers panicked. Stopping optimisation."); + eprintln!("Priority queue panicked. Stopping optimisation."); break; } } } recv(timeout_event) -> _ => { timeout_flag = true; + pq.timeout(); break; } } } - logger.log_processing_end(circ_cnt, best_circ_cost, true, timeout_flag); + // Empty the log from the priority queue and store final circuit count. + let mut circuit_cnt = None; + while let Ok(log) = pq.log.recv() { + match log { + PriorityChannelLog::NewBestCircuit(circ, cost) => { + best_circ = circ; + best_circ_cost = cost; + logger.log_best(&best_circ_cost); + } + PriorityChannelLog::CircuitCount(circ_cnt, _) => { + circuit_cnt = Some(circ_cnt); + } + } + } + logger.log_processing_end(circuit_cnt.unwrap_or(0), best_circ_cost, true, timeout_flag); - // Drop the channel so the threads know to stop. - drop(tx_work); joins.into_iter().for_each(|j| j.join().unwrap()); best_circ diff --git a/src/optimiser/taso/hugr_pchannel.rs b/src/optimiser/taso/hugr_pchannel.rs index 1ec0d2e4..2e1751fa 100644 --- a/src/optimiser/taso/hugr_pchannel.rs +++ b/src/optimiser/taso/hugr_pchannel.rs @@ -1,9 +1,9 @@ //! A multi-producer multi-consumer min-priority channel of Hugrs. -use std::marker::PhantomData; use std::thread; use crossbeam_channel::{select, Receiver, Sender}; +use fxhash::FxHashSet; use hugr::Hugr; use super::hugr_pqueue::{Entry, HugrPQ}; @@ -13,83 +13,201 @@ use super::hugr_pqueue::{Entry, HugrPQ}; /// Queues hugrs using a cost function `C` that produces priority values `P`. /// /// Uses a thread internally to orchestrate the queueing. -pub struct HugrPriorityChannel { - _phantom: PhantomData<(P, C)>, +pub(super) struct HugrPriorityChannel { + // Channels to add and remove circuits from the queue. + push: Receiver>, + pop: Sender<(u64, Hugr)>, + // Outbound channel to log to main thread. + log: Sender>, + // Inbound channel to be terminated. + timeout: Receiver<()>, + // The queue capacity. Queue size is halved when it exceeds this. + queue_capacity: usize, + // The priority queue data structure. + pq: HugrPQ, + // The set of hashes we've seen. + seen_hashes: FxHashSet, + // The minimum cost we've seen. + min_cost: Option

, + // The number of circuits we've seen (for logging). + circ_cnt: usize, } -pub type Item = (u64, Hugr); +pub(super) type Item = (u64, Hugr); -impl HugrPriorityChannel +/// Logging information from the priority channel. +pub(super) enum PriorityChannelLog { + NewBestCircuit(Hugr, C), + CircuitCount(usize, usize), +} + +/// Channels for communication with the priority channel. +pub(super) struct PriorityChannelCommunication { + pub(super) push: Option>>, + pub(super) pop: Option>, + pub(super) log: Receiver>, + timeout: Sender<()>, +} + +impl PriorityChannelCommunication { + /// Send Timeout signal to the priority channel. + pub(super) fn timeout(&self) { + self.timeout.send(()).unwrap(); + } + + /// Close the local copies of the push and pop channels. + pub(super) fn drop_pop_push(&mut self) { + self.pop = None; + self.push = None; + } +} + +impl HugrPriorityChannel where C: Fn(&Hugr) -> P + Send + Sync + 'static, + P: Ord + Send + Sync + Clone + 'static, { /// Initialize the queueing system. /// - /// Get back a channel on which to queue hugrs with their hash, and - /// a channel on which to receive the output. - pub fn init(cost_fn: C, queue_capacity: usize) -> (Sender>, Receiver) { - let (ins, inr) = crossbeam_channel::unbounded(); - let (outs, outr) = crossbeam_channel::bounded(0); - Self::run(inr, outs, cost_fn, queue_capacity); - (ins, outr) + /// Start the Hugr priority queue in a new thread. + /// + /// Get back channels for communication with the priority queue + /// - push/pop channels for adding and removing circuits to/from the queue, + /// - a channel on which to receive logging information, and + /// - a channel on which to send a timeout signal. + pub(super) fn init(cost_fn: C, queue_capacity: usize) -> PriorityChannelCommunication

{ + // channels for pushing and popping circuits from pqueue + let (tx_push, rx_push) = crossbeam_channel::unbounded(); + let (tx_pop, rx_pop) = crossbeam_channel::bounded(0); + // channels for communication with main (logging, minimum circuits and timeout) + let (tx_log, rx_log) = crossbeam_channel::unbounded(); + let (tx_timeout, rx_timeout) = crossbeam_channel::bounded(0); + let pq = + HugrPriorityChannel::new(rx_push, tx_pop, tx_log, rx_timeout, cost_fn, queue_capacity); + pq.run(); + PriorityChannelCommunication { + push: Some(tx_push), + pop: Some(rx_pop), + log: rx_log, + timeout: tx_timeout, + } } - /// Run the queuer as a thread. - fn run( - in_channel_orig: Receiver>, - out_channel_orig: Sender<(u64, Hugr)>, + fn new( + push: Receiver>, + pop: Sender<(u64, Hugr)>, + log: Sender>, + timeout: Receiver<()>, cost_fn: C, queue_capacity: usize, - ) { + ) -> Self { + // The priority queue, local to this thread. + let pq = HugrPQ::with_capacity(cost_fn, queue_capacity); + // The set of hashes we've seen. + let seen_hashes = FxHashSet::default(); + // The minimum cost we've seen. + let min_cost = None; + // The number of circuits we've seen (for logging). + let circ_cnt = 0; + + HugrPriorityChannel { + push, + pop, + log, + timeout, + queue_capacity, + pq, + seen_hashes, + min_cost, + circ_cnt, + } + } + + /// Run the queuer as a thread. + fn run(mut self) { let builder = thread::Builder::new().name("priority queueing".into()); - let in_channel = in_channel_orig.clone(); - let out_channel = out_channel_orig.clone(); let _ = builder .name("priority-channel".into()) .spawn(move || { - // The priority queue, local to this thread. - let mut pq: HugrPQ = - HugrPQ::with_capacity(cost_fn, queue_capacity); - loop { - if pq.is_empty() { - // Nothing queued to go out. Wait for input. - match in_channel.recv() { - Ok(new_circs) => { - for (hash, circ) in new_circs { - pq.push_with_hash_unchecked(circ, hash); - } - } - // The sender has closed the channel, we can stop. - Err(_) => break, - } + if self.pq.is_empty() { + let Ok(new_circs) = self.push.recv() else { + // The senders have closed the channel, we can stop. + break; + }; + self.enqueue_circs(new_circs); } else { select! { - recv(in_channel) -> result => { - match result { - Ok(new_circs) => { - for (hash, circ) in new_circs { - pq.push_with_hash_unchecked(circ, hash); - } - } - // The sender has closed the channel, we can stop. - Err(_) => break, - } + recv(self.push) -> result => { + let Ok(new_circs) = result else { + // The senders have closed the channel, we can stop. + break; + }; + self.enqueue_circs(new_circs); } - send(out_channel, {let Entry {hash, circ, ..} = pq.pop().unwrap(); (hash, circ)}) -> result => { + send(self.pop, {let Entry {hash, circ, ..} = self.pq.pop().unwrap(); (hash, circ)}) -> result => { match result { Ok(()) => {}, // The receivers have closed the channel, we can stop. Err(_) => break, } } + recv(self.timeout) -> _ => { + // We've timed out. + break + } } } - if pq.len() >= queue_capacity { - pq.truncate(queue_capacity / 2); - } } + // Send a last set of logs before terminating. + self.log + .send(PriorityChannelLog::CircuitCount( + self.circ_cnt, + self.seen_hashes.len(), + )) + .unwrap(); }) .unwrap(); } + + /// Add circuits to queue. + #[tracing::instrument(target = "taso::metrics", skip(self, circs))] + fn enqueue_circs(&mut self, circs: Vec<(u64, Hugr)>) { + for (hash, circ) in circs { + let cost = (self.pq.cost_fn)(&circ); + if !self.seen_hashes.insert(hash) { + // Ignore this circuit: we've seen it before. + continue; + } + + // A new best circuit + if self.min_cost.is_none() || Some(&cost) < self.min_cost.as_ref() { + self.min_cost = Some(cost.clone()); + self.log + .send(PriorityChannelLog::NewBestCircuit( + circ.clone(), + cost.clone(), + )) + .unwrap(); + } + + self.circ_cnt += 1; + self.pq.push_unchecked(circ, hash, cost); + + // Send logs every 1000 circuits. + if self.circ_cnt % 1000 == 0 { + // TODO: Add a minimum time between logs + self.log + .send(PriorityChannelLog::CircuitCount( + self.circ_cnt, + self.seen_hashes.len(), + )) + .unwrap(); + } + } + // If the queue got too big, truncate it. + if self.pq.len() >= self.queue_capacity { + self.pq.truncate(self.queue_capacity / 2); + } + } } diff --git a/src/optimiser/taso/hugr_pqueue.rs b/src/optimiser/taso/hugr_pqueue.rs index f362b5dc..e3ae8c27 100644 --- a/src/optimiser/taso/hugr_pqueue.rs +++ b/src/optimiser/taso/hugr_pqueue.rs @@ -13,7 +13,7 @@ use crate::circuit::CircuitHash; pub(super) struct HugrPQ { queue: DoublePriorityQueue, hash_lookup: FxHashMap, - cost_fn: C, + pub(super) cost_fn: C, } pub(super) struct Entry { @@ -51,20 +51,20 @@ impl HugrPQ { C: Fn(&Hugr) -> P, { let hash = hugr.circuit_hash(); - self.push_with_hash_unchecked(hugr, hash); + let cost = (self.cost_fn)(&hugr); + self.push_unchecked(hugr, hash, cost); } - /// Push a Hugr into the queue with a precomputed hash. + /// Push a Hugr into the queue with a precomputed hash and cost. /// - /// This is useful to avoid recomputing the hash in [`HugrPQ::push`] when - /// it is already known. + /// This is useful to avoid recomputing the hash and cost function in + /// [`HugrPQ::push`] when they are already known. /// /// This does not check that the hash is valid. - pub(super) fn push_with_hash_unchecked(&mut self, hugr: Hugr, hash: u64) + pub(super) fn push_unchecked(&mut self, hugr: Hugr, hash: u64, cost: P) where C: Fn(&Hugr) -> P, { - let cost = (self.cost_fn)(&hugr); self.queue.push(hash, cost); self.hash_lookup.insert(hash, hugr); } @@ -86,6 +86,12 @@ impl HugrPQ { } } + /// The largest cost in the queue. + #[allow(unused)] + pub(super) fn max_cost(&self) -> Option<&P> { + self.queue.peek_max().map(|(_, cost)| cost) + } + delegate! { to self.queue { pub(super) fn len(&self) -> usize; diff --git a/src/optimiser/taso/worker.rs b/src/optimiser/taso/worker.rs index 0d8667c8..a2aadbf7 100644 --- a/src/optimiser/taso/worker.rs +++ b/src/optimiser/taso/worker.rs @@ -51,7 +51,7 @@ where let send = tracing::trace_span!(target: "taso::metrics", "TasoWorker::send_result") .in_scope(|| tx_result.send(hashed_circs)); if send.is_err() { - // The main thread closed the send channel, we can stop. + // The priority queue closed the send channel, we can stop. break; } } From 7099098b960c1c1a9e522e7b0ccc6cfbe4b66d90 Mon Sep 17 00:00:00 2001 From: Luca Mondada <72734770+lmondada@users.noreply.github.com> Date: Wed, 4 Oct 2023 13:22:39 +0200 Subject: [PATCH 3/3] fix: Seen circuits may be accepted again (#167) --- src/optimiser/taso.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/optimiser/taso.rs b/src/optimiser/taso.rs index a6045a43..edcaea09 100644 --- a/src/optimiser/taso.rs +++ b/src/optimiser/taso.rs @@ -140,12 +140,12 @@ where let rewrites = self.rewriter.get_rewrites(&circ); for new_circ in self.strategy.apply_rewrites(rewrites, &circ) { let new_circ_hash = new_circ.circuit_hash(); - logger.log_progress(circ_cnt, Some(pq.len()), seen_hashes.len()); if !seen_hashes.insert(new_circ_hash) { // Ignore this circuit: we've already seen it continue; } circ_cnt += 1; + logger.log_progress(circ_cnt, Some(pq.len()), seen_hashes.len()); let new_circ_cost = self.cost(&new_circ); pq.push_unchecked(new_circ, new_circ_hash, new_circ_cost); }