forked from lucasrla/remarks
-
Notifications
You must be signed in to change notification settings - Fork 3
/
datatest.py
176 lines (151 loc) · 5.85 KB
/
datatest.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
#!/usr/bin/env python3
import os
import glob
import time
import sqlite3
from concurrent.futures import ProcessPoolExecutor, as_completed
from tqdm import tqdm
import subprocess
from datetime import datetime
import atexit
class ProcessingLogger:
def __init__(self, db_path="processing_log.db"):
self.db_path = db_path
self.setup_database()
atexit.register(self.close)
def setup_database(self):
self.conn = sqlite3.connect(self.db_path)
self.conn.execute("""
CREATE TABLE IF NOT EXISTS processing_runs (
id INTEGER PRIMARY KEY AUTOINCREMENT,
start_time TIMESTAMP,
end_time TIMESTAMP,
total_files INTEGER,
successful_files INTEGER,
failed_files INTEGER,
total_duration REAL
)
""")
self.conn.execute("""
CREATE TABLE IF NOT EXISTS file_logs (
id INTEGER PRIMARY KEY AUTOINCREMENT,
run_id INTEGER,
file_path TEXT,
status TEXT,
stdout TEXT,
stderr TEXT,
error_message TEXT,
processing_time REAL,
timestamp TIMESTAMP,
FOREIGN KEY (run_id) REFERENCES processing_runs(id)
)
""")
self.conn.commit()
def start_run(self, total_files):
cursor = self.conn.execute(
"INSERT INTO processing_runs (start_time, total_files) VALUES (?, ?)",
(datetime.now(), total_files)
)
self.current_run_id = cursor.lastrowid
self.conn.commit()
return self.current_run_id
def log_file(self, file_path, status, stdout, stderr, error_message, processing_time):
self.conn.execute("""
INSERT INTO file_logs
(run_id, file_path, status, stdout, stderr, error_message, processing_time, timestamp)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
""", (self.current_run_id, file_path, status, stdout, stderr, error_message,
processing_time, datetime.now()))
self.conn.commit()
def end_run(self, successful, failed, duration):
self.conn.execute("""
UPDATE processing_runs
SET end_time = ?, successful_files = ?, failed_files = ?,
total_duration = ?
WHERE id = ?
""", (datetime.now(), successful, failed, duration, self.current_run_id))
self.conn.commit()
def close(self):
if hasattr(self, 'conn'):
self.conn.close()
def process_file(file_path):
"""Process a single file using remarks command"""
start_time = time.time()
try:
result = subprocess.run(
['python', '-m', 'remarks', file_path, "tests/out/data-out"],
capture_output=True,
check=True,
text=True,
timeout=180
)
duration = time.time() - start_time
return True, file_path, result.stdout, result.stderr, None, duration
except subprocess.TimeoutExpired as e:
duration = time.time() - start_time
return False, file_path, e.stdout, e.stderr, str(e), duration
except subprocess.CalledProcessError as e:
duration = time.time() - start_time
return False, file_path, e.stdout, e.stderr, str(e), duration
def main():
# Create output directory
os.makedirs("test/out/data-out", exist_ok=True)
# Initialize logger
logger = ProcessingLogger()
# Find all .rm files
files = glob.glob("tests/data-tests/*.rmn")
total_files = len(files)
print(f"Found {total_files} files to process")
# Start run in logger
logger.start_run(total_files)
# Track timing
start_time = time.time()
# Process files in parallel with progress bar
successful = 0
failed = 0
# Use number of CPU cores for parallel processing
max_workers = os.cpu_count()
try:
with ProcessPoolExecutor(max_workers=max_workers) as executor:
# Submit all tasks
future_to_file = {executor.submit(process_file, file): file
for file in files}
# Process as they complete with progress bar
with tqdm(total=total_files, desc="Processing files") as pbar:
for future in as_completed(future_to_file):
success, file_path, stdout, stderr, error, duration = future.result()
# Log the result
logger.log_file(
file_path,
"success" if success else "failed",
stdout,
stderr,
error,
duration
)
# If there's any stderr output, print it even for successful runs
if not success:
tqdm.write(f"\nError processing {file_path}:")
tqdm.write(f"Error: {error}")
elif stderr:
tqdm.write(f"\nWarning in {file_path}:")
tqdm.write(stderr)
if success:
successful += 1
else:
failed += 1
pbar.update(1)
except KeyboardInterrupt:
print("Bye! :)")
# Record final statistics
end_time = time.time()
total_duration = end_time - start_time
logger.end_run(successful, failed, total_duration)
# Print summary
print("\nProcessing complete!")
print(f"Total time: {total_duration:.2f} seconds")
print(f"Files processed: {successful}")
print(f"Files failed: {failed}")
print(f"Results logged to {logger.db_path}")
if __name__ == "__main__":
main()