From 91238283f74c2f1e3708505a1c0dddccc859c81d Mon Sep 17 00:00:00 2001 From: Luka Cehovin Zajc Date: Wed, 14 Jun 2023 11:32:01 +0200 Subject: [PATCH 1/9] Reorganizing report generation, CLI changes --- vot/__init__.py | 19 +++- vot/document/__init__.py | 226 ++++++++++++++++++++++++++++++++------ vot/document/common.py | 33 ++++-- vot/document/html.py | 54 +++++---- vot/document/latex.py | 135 ++++++++++------------- vot/utilities/__init__.py | 4 +- vot/utilities/cli.py | 62 ++++++++++- 7 files changed, 378 insertions(+), 155 deletions(-) diff --git a/vot/__init__.py b/vot/__init__.py index 2db5dfc..406a1a9 100644 --- a/vot/__init__.py +++ b/vot/__init__.py @@ -66,13 +66,24 @@ def check_updates() -> bool: from attributee import Attributee, Integer, Boolean class GlobalConfiguration(Attributee): - """Global configuration object for the toolkit. It is used to store global configuration options. + """Global configuration object for the toolkit. It is used to store global configuration options. It can be initialized + from environment variables. The following options are supported: + + - ``VOT_DEBUG_MODE``: Enables debug mode for the toolkit. + - ``VOT_SEQUENCE_CACHE_SIZE``: Maximum number of sequences to keep in cache. + - ``VOT_RESULTS_BINARY``: Enables binary results format. + - ``VOT_MASK_OPTIMIZE_READ``: Enables mask optimization when reading masks. + - ``VOT_WORKER_POOL_SIZE``: Number of workers to use for parallel processing. + - ``VOT_PERSISTENT_CACHE``: Enables persistent cache for analysis results in workspace. + """ debug_mode = Boolean(default=False, description="Enables debug mode for the toolkit.") - sequence_cache_size = Integer(default=1000, description="Maximum number of sequences to keep in cache.") + sequence_cache_size = Integer(default=100, description="Maximum number of sequences to keep in cache.") results_binary = Boolean(default=True, description="Enables binary results format.") mask_optimize_read = Boolean(default=True, description="Enables mask optimization when reading masks.") + worker_pool_size = Integer(default=1, description="Number of workers to use for parallel processing.") + persistent_cache = Boolean(default=True, description="Enables persistent cache for analysis results in workspace.") def __init__(self): """Initializes the global configuration object. It reads the configuration from environment variables. @@ -90,9 +101,7 @@ def __init__(self): def __repr__(self): """Returns a string representation of the global configuration object.""" - return "debug_mode={} sequence_cache_size={} results_binary={} mask_optimize_read={}".format( - self.debug_mode, self.sequence_cache_size, self.results_binary, self.mask_optimize_read - ) + return " ".join(["{}={}".format(k, getattr(self, k)) for k in self.attributes()]) config = GlobalConfiguration() diff --git a/vot/document/__init__.py b/vot/document/__init__.py index 893976f..599116d 100644 --- a/vot/document/__init__.py +++ b/vot/document/__init__.py @@ -1,7 +1,6 @@ """ This module contains classes for generating reports and visualizations. """ import typing -from abc import ABC, abstractmethod import json import inspect import threading @@ -9,7 +8,7 @@ import collections import collections.abc import sys -from asyncio import wait +from asyncio import wait, ensure_future from asyncio.futures import wrap_future import numpy as np @@ -27,9 +26,12 @@ from vot.dataset import Sequence from vot.tracker import Tracker from vot.analysis import Axes +from vot.experiment import Experiment from vot.utilities import class_fullname from vot.utilities.data import Grid +Table = collections.namedtuple("Table", ["header", "data", "order"]) + class Plot(object): """ Base class for all plots. """ @@ -158,7 +160,7 @@ def represent_object(self, o): ResultsYAMLEncoder.add_multi_representer(np.integer, ResultsYAMLEncoder.represent_int) ResultsYAMLEncoder.add_multi_representer(np.inexact, ResultsYAMLEncoder.represent_float) -def generate_serialized(trackers: typing.List[Tracker], sequences: typing.List[Sequence], results, storage: "Storage", serializer: str): +def generate_serialized(trackers: typing.List[Tracker], sequences: typing.List[Sequence], results, storage: "Storage", serializer: str, name: str): """ Generates a serialized report of the results. """ doc = dict() @@ -177,10 +179,10 @@ def generate_serialized(trackers: typing.List[Tracker], sequences: typing.List[S doc["results"][experiment.identifier] = exp if serializer == "json": - with storage.write("results.json") as handle: + with storage.write(name) as handle: json.dump(doc, handle, indent=2, cls=ResultsJSONEncoder) elif serializer == "yaml": - with storage.write("results.yaml") as handle: + with storage.write(name) as handle: yaml.dump(doc, handle, Dumper=ResultsYAMLEncoder) else: raise RuntimeError("Unknown serializer") @@ -378,7 +380,7 @@ def default() -> "StyleManager": manager = getattr(StyleManager._context, 'style_manager', None) if manager is None: - get_logger().info("Creating new style manager") + get_logger().info("Creating new style manager", stack_info=True) manager = StyleManager() StyleManager._context.style_manager = manager @@ -429,13 +431,16 @@ def __call__(self, experiments, trackers, sequences): return indices -class Generator(Attributee): - """ A generator for reports.""" +class Report(Attributee): + """ A report generator for various reports. Base class for all report generators. """ async def generate(self, experiments, trackers, sequences): - raise NotImplementedError + raise NotImplementedError() async def process(self, analyses, experiment, trackers, sequences): + + sequences = experiment.transform(sequences) + if sys.version_info >= (3, 3): _Iterable = collections.abc.Iterable else: @@ -455,42 +460,195 @@ async def process(self, analyses, experiment, trackers, sequences): else: return (future.result() for future in futures) +class SeparableReport(Report): + """ A report generator that is separable across experiments. Base class for all separable report generators. """ + + async def perexperiment(self, experiment, trackers, sequences): + raise NotImplementedError() + + def compatible(self, experiment): + raise NotImplementedError() + + async def generate(self, experiments, trackers, sequences): + + futures = [] + texperiments = [] + + for experiment in experiments: + + tsequences = experiment.transform(sequences) + + if self.compatible(experiment): + futures.append(ensure_future(self.perexperiment(experiment, trackers, tsequences))) + texperiments.append(experiment) + else: + continue + + await wait(futures) + + items = dict() + + for experiment, future in zip(texperiments, futures): + items[experiment.identifier] = future.result() + + return items + +class StackAnalysesPlots(SeparableReport): + """ A document that produces plots for all analyses configures in stack experiments. """ + + async def perexperiment(self, experiment, trackers, sequences): + + from vot.document.common import extract_plots + + analyses = [analysis for analysis in experiment.analyses if analysis.compatible(experiment)] + + results = {a: r for a, r in zip(analyses, await self.process(analyses, experiment, trackers, sequences))} + + # Plot in reverse order, with best trackers on top + z_order = list(reversed(range(len(trackers)))) + + return [p for _, p in extract_plots(trackers, {experiment: results}, z_order)[experiment]] + + def compatible(self, experiment): + return True + +class StackAnalysesTable(SeparableReport): + """ A document that produces plots for all analyses configures in stack experiments. """ + + async def perexperiment(self, experiment, trackers, sequences): + + from vot.document.common import extract_measures_table + + analyses = [analysis for analysis in experiment.analyses if analysis.compatible(experiment)] + + results = {a: r for a, r in zip(analyses, await self.process(analyses, experiment, trackers, sequences))} + + table = extract_measures_table(trackers, {experiment: results}) + + return [table] + + def compatible(self, experiment): + return True + class ReportConfiguration(Attributee): """ A configuration for reports.""" style = Nested(StyleManager) sort = Nested(TrackerSorter) - generators = List(Object(subclass=Generator), default=[]) + index = List(Object(subclass=Report), default=[], description="The reports to include.") + +def generate_document(workspace: "Workspace", trackers: typing.List[Tracker], format: str, name: str): + """Generate a report for a one or multiple trackers on an experiment stack and a set of sequences. -# TODO: replace this with report generator and separate json/yaml dump -def generate_document(format: str, config: ReportConfiguration, trackers: typing.List[Tracker], sequences: typing.List[Sequence], results, storage: "Storage"): - """ Generates a report document. - Args: - format: The format of the report. - config: The configuration of the report. + workspace: The workspace to use for the report. trackers: The trackers to include in the report. - sequences: The sequences to include in the report. - results: The results to include in the report. - storage: The storage to use for the report. - + format: The format of the report. + name: The name of the report. """ + from asyncio import ensure_future, get_event_loop, wait + + from vot.analysis import AnalysisProcessor + from vot.utilities import Progress + from vot.workspace.storage import Cache + from vot import config + + def merge_tree(src, dest): + + for key, value in src.items(): + if not key in dest: + dest[key] = list() + dest[key] += value - from .html import generate_html_document - from .latex import generate_latex_document + logger = get_logger() - if format == "json": - generate_serialized(trackers, sequences, results, storage, "json") - elif format == "yaml": - generate_serialized(trackers, sequences, results, storage, "yaml") + if config.worker_pool_size == 1: + + if config.debug_mode: + import logging + from vot.analysis.processor import DebugExecutor + logging.getLogger("concurrent.futures").setLevel(logging.DEBUG) + executor = DebugExecutor() + else: + from vot.utilities import ThreadPoolExecutor + executor = ThreadPoolExecutor(1) + + else: + from concurrent.futures import ProcessPoolExecutor + executor = ProcessPoolExecutor(config.workers) + + if not config.persistent_cache: + from cachetools import LRUCache + cache = LRUCache(1000) else: - order = config.sort(results.keys(), trackers, sequences) + cache = Cache(workspace.storage.substorage("cache").substorage("analysis")) + + with workspace.report.style: + + try: - with config.style: - if format == "html": - generate_html_document(trackers, sequences, results, storage) - elif format == "latex": - generate_latex_document(trackers, sequences, results, storage, False, order=order) - elif format == "pdf": - generate_latex_document(trackers, sequences, results, storage, True, order=order) + with AnalysisProcessor(executor, cache) as processor: + + order = workspace.report.sort(workspace.stack, trackers, workspace.dataset) + trackers = [trackers[i] for i in order] + + futures = [] + + for report in workspace.report.index: + futures.append(ensure_future(report.generate(workspace.stack, trackers, workspace.dataset))) + + loop = get_event_loop() + + progress = Progress("Processing", processor.total) + + def update(): + progress.total(processor.total) + progress.absolute(processor.total - processor.pending) + loop.call_later(1, update) + + update() + + if len(futures) > 0: + loop.run_until_complete(wait(futures)) + + progress.close() + + reports = dict() + + for future in futures: + merge_tree(future.result(), reports) + + finally: + + executor.shutdown(wait=True) + + report_storage = workspace.storage.substorage("reports").substorage(name) + + def only_plots(reports, format: str, storage: "Storage"): + """Filter out all non-plot items from the report and save them to storage. + + Args: + reports: The reports to filter. + format: The format to save the plots in. + """ + for key, section in reports.items(): + for item in section: + if isinstance(item, Plot): + logger.debug("Saving plot %s", item.identifier) + plot_name = key + "_" + item.identifier + '.%s' % format.lower() + with storage.write(plot_name, binary=True) as out: + item.save(out, format.upper()) + + if format == "html": + from .html import generate_html_document + generate_html_document(trackers, workspace.dataset, reports, report_storage) + elif format == "latex": + from .latex import generate_latex_document + generate_latex_document(trackers, workspace.dataset, reports, report_storage) + elif format == "pdf_plots": + only_plots(reports, "pdf", report_storage) + elif format == "png_plots": + only_plots(reports, "png", report_storage) + else: + raise ValueError("Unknown report format %s" % format) \ No newline at end of file diff --git a/vot/document/common.py b/vot/document/common.py index 8a645e0..b8fcd4c 100644 --- a/vot/document/common.py +++ b/vot/document/common.py @@ -1,8 +1,10 @@ """Common functions for document generation.""" import os import math +from typing import List -from vot.document import ScatterPlot, LinePlot +from vot.tracker import Tracker +from vot.document import ScatterPlot, LinePlot, Table from vot.analysis import Measure, Point, Plot, Curve, Sorting, Axes def read_resource(name): @@ -15,13 +17,13 @@ def per_tracker(a): """Returns true if the analysis is per-tracker.""" return a.axes == Axes.TRACKERS -def extract_measures_table(trackers, results): +def extract_measures_table(trackers: List[Tracker], results) -> Table: """Extracts a table of measures from the results. The table is a list of lists, where each list is a column. The first column is the tracker name, the second column is the measure name, and the rest of the columns are the values for each tracker. Args: trackers (list): List of trackers. - results (dict): Dictionary of results. + results (dict): Dictionary of results. It is a dictionary of dictionaries, where the first key is the experiment, and the second key is the analysis. The value is a list of results for each tracker. """ table_header = [[], [], []] table_data = dict() @@ -77,10 +79,18 @@ def extract_measures_table(trackers, results): order[v[1]] = j table_order.append(order) - return table_header, table_data, table_order + return Table(table_header, table_data, table_order) -def extract_plots(trackers, results, order=None): - """Extracts a list of plots from the results. The list is a list of tuples, where each tuple is a pair of strings and a plot.""" +def extract_plots(trackers: List[Tracker], results, order=None): + """Extracts a list of plots from the results. The list is a list of tuples, where each tuple is a pair of strings and a plot. + + Args: + trackers (list): List of trackers. + results (dict): Dictionary of results. It is a dictionary of dictionaries, where the first key is the experiment, and the second key is the analysis. The value is a list of results for each tracker. + + Returns: + list: List of plots. + """ plots = dict() j = 0 @@ -131,7 +141,16 @@ def extract_plots(trackers, results, order=None): return plots def format_value(data): - """Formats a value for display.""" + """Formats a value for display. If the value is a string, it is returned as is. If the value is an integer, it is returned as a string. + If the value is a float, it is returned as a string with 3 decimal places. Otherwise, the value is converted to a string. + + Args: + data: Value to format. + + Returns: + str: Formatted value. + + """ if data is None: return "N/A" if isinstance(data, str): diff --git a/vot/document/html.py b/vot/document/html.py index 21ef299..9d0a770 100644 --- a/vot/document/html.py +++ b/vot/document/html.py @@ -14,7 +14,7 @@ from vot.dataset import Sequence from vot.workspace import Storage from vot.document.common import format_value, read_resource, merge_repeats, extract_measures_table, extract_plots -from vot.document import StyleManager +from vot.document import StyleManager, Table, Plot from vot.utilities.data import Grid ORDER_CLASSES = {1: "first", 2: "second", 3: "third"} @@ -59,13 +59,13 @@ def grid_table(data: Grid, rows: List[str], columns: List[str]): return element -def generate_html_document(trackers: List[Tracker], sequences: List[Sequence], results, storage: Storage): +def generate_html_document(trackers: List[Tracker], sequences: List[Sequence], reports, storage: Storage): """Generates an HTML document from the results of the experiments. Args: trackers (list): List of trackers. sequences (list): List of sequences. - results (dict): Dictionary of results. + reports (dict): List of reports as tuples of (name, data). storage (Storage): Storage object. """ @@ -98,9 +98,6 @@ def add_script(name, linked=False): logger = logging.getLogger("vot") - table_header, table_data, table_order = extract_measures_table(trackers, results) - plots = extract_plots(trackers, results) - legend = StyleManager.default().legend(Tracker) doc = dominate.document(title='VOT report') @@ -114,50 +111,51 @@ def add_script(name, linked=False): add_script("table.js", linked) add_script("report.js", linked) - with doc: - - h1("VOT report") - - with ol(cls="metadata"): - li('Toolkit version: ' + toolkit_version()) - li('Created: ' + datetime.datetime.now().isoformat()) - - if len(table_header[2]) == 0: + # TODO: make table more general (now it assumes a tracker per row) + def make_table(data: Table): + if len(data.header[2]) == 0: logger.debug("No measures found, skipping table") else: with table(cls="overview-table pure-table pure-table-horizontal pure-table-striped"): with thead(): with tr(): th() - [th(c[0].identifier, colspan=c[1]) for c in merge_repeats(table_header[0])] + [th(c[0].identifier, colspan=c[1]) for c in merge_repeats(data.header[0])] with tr(): th() - [th(c[0].title, colspan=c[1]) for c in merge_repeats(table_header[1])] + [th(c[0].title, colspan=c[1]) for c in merge_repeats(data.header[1])] with tr(): th("Trackers") - [th(c.abbreviation, data_sort="int" if order else "") for c, order in zip(table_header[2], table_order)] + [th(c.abbreviation, data_sort="int" if order else "") for c, order in zip(data.header[2], data.order)] with tbody(): - for tracker, data in table_data.items(): + for tracker, row in data.data.items(): with tr(data_tracker=tracker.reference): with td(): insert_mplfigure(legend.figure(tracker)) span(tracker.label) - for value, order in zip(data, table_order): + for value, order in zip(row, data.order): insert_cell(value, order[tracker] if not order is None else None) - for experiment, experiment_plots in plots.items(): - if len(experiment_plots) == 0: - continue + with doc: - h2("Experiment {}".format(experiment.identifier), cls="experiment") + h1("VOT toolkit report document") - with div(cls="plots"): + with ol(cls="metadata"): + li('Toolkit version: ' + toolkit_version()) + li('Created: ' + datetime.datetime.now().isoformat()) - for title, plot in experiment_plots: + for key, section in reports.items(): + + h2(key, cls="section") + + for item in section: + if isinstance(item, Table): + make_table(item) + if isinstance(item, Plot): + plot = item with div(cls="plot"): - p(title) + p(key) insert_figure(plot) - with storage.write("report.html") as filehandle: filehandle.write(doc.render()) diff --git a/vot/document/latex.py b/vot/document/latex.py index e768031..0aca8f0 100644 --- a/vot/document/latex.py +++ b/vot/document/latex.py @@ -13,8 +13,8 @@ from vot.tracker import Tracker from vot.dataset import Sequence from vot.workspace import Storage -from vot.document.common import format_value, read_resource, merge_repeats, extract_measures_table, extract_plots -from vot.document import StyleManager +from vot.document.common import format_value, read_resource, merge_repeats +from vot.document import StyleManager, Plot, Table TRACKER_GROUP = "default" @@ -54,19 +54,16 @@ def generate_symbols(container, trackers): container.append(Command("makeatother")) - -def generate_latex_document(trackers: List[Tracker], sequences: List[Sequence], results, storage: Storage, build=False, multipart=True, order=None) -> str: +def generate_latex_document(trackers: List[Tracker], sequences: List[Sequence], reports, storage: Storage, multipart=True) -> str: """Generates a LaTeX document with the results. The document is returned as a string. If build is True, the document is compiled and the PDF is returned. Args: trackers (list): List of trackers. sequences (list): List of sequences. - results (dict): Dictionary of results. + reports (list): List of results tuples. storage (Storage): Storage object. - build (bool): If True, the document is compiled and the PDF is returned. multipart (bool): If True, the document is split into multiple files. - order (list): List of tracker indices to use for ordering. """ order_marks = {1: "first", 2: "second", 3: "third"} @@ -80,13 +77,6 @@ def format_cell(value, order): logger = get_logger() - table_header, table_data, table_order = extract_measures_table(trackers, results) - - if order is not None: - ordered_trackers = [trackers[i] for i in order] - else: - ordered_trackers = trackers - doc = Document(page_numbers=True) doc.preamble.append(Package('pgf')) @@ -99,74 +89,71 @@ def format_cell(value, order): doc.preamble.append(UnsafeCommand('newcommand', r'\second', options=1, extra_arguments=r'{\color{green} #1 }')) doc.preamble.append(UnsafeCommand('newcommand', r'\third', options=1, extra_arguments=r'{\color{blue} #1 }')) + # TODO: make table more general (now it assumes a tracker per row) + def make_table(doc, table): + + if len(table.header[2]) == 0: + logger.debug("No measures found, skipping table") + else: + + # Generate data table + with doc.create(LongTable("l " * (len(table.header[2]) + 1))) as data_table: + data_table.add_hline() + data_table.add_row([" "] + [MultiColumn(c[1], data=c[0].identifier) for c in merge_repeats(table.header[0])]) + data_table.add_hline() + data_table.add_row([" "] + [MultiColumn(c[1], data=c[0].title) for c in merge_repeats(table.header[1])]) + data_table.add_hline() + data_table.add_row(["Tracker"] + [" " + c.abbreviation + " " for c in table.header[2]]) + data_table.add_hline() + data_table.end_table_header() + data_table.add_hline() + + for tracker, data in table.data.items(): + data_table.add_row([UnsafeCommand("Tracker", [tracker.reference, TRACKER_GROUP])] + + [format_cell(x, order[tracker] if not order is None else None) for x, order in zip(data, table.order)]) + if multipart: container = Chunk() - generate_symbols(container, ordered_trackers) + generate_symbols(container, trackers) with storage.write("symbols.tex") as out: container.dump(out) doc.preamble.append(Command("input", "symbols.tex")) else: - generate_symbols(doc.preamble, ordered_trackers) + generate_symbols(doc.preamble, trackers) - doc.preamble.append(Command('title', 'VOT report')) + doc.preamble.append(Command('title', 'VOT toolkit report')) doc.preamble.append(Command('author', 'Toolkit version ' + toolkit_version())) doc.preamble.append(Command('date', datetime.datetime.now().isoformat())) doc.append(NoEscape(r'\maketitle')) - - if len(table_header[2]) == 0: - logger.debug("No measures found, skipping table") - else: - - # Generate data table - with doc.create(LongTable("l " * (len(table_header[2]) + 1))) as data_table: - data_table.add_hline() - data_table.add_row([" "] + [MultiColumn(c[1], data=c[0].identifier) for c in merge_repeats(table_header[0])]) - data_table.add_hline() - data_table.add_row([" "] + [MultiColumn(c[1], data=c[0].title) for c in merge_repeats(table_header[1])]) - data_table.add_hline() - data_table.add_row(["Tracker"] + [" " + c.abbreviation + " " for c in table_header[2]]) - data_table.add_hline() - data_table.end_table_header() - data_table.add_hline() - - for tracker in ordered_trackers: - data = table_data[tracker] - data_table.add_row([UnsafeCommand("Tracker", [tracker.reference, TRACKER_GROUP])] + - [format_cell(x, order[tracker] if not order is None else None) for x, order in zip(data, table_order)]) - - if order is not None: - z_order = [0] * len(order) - for i, j in enumerate(order): - z_order[max(order) - i] = j - else: - z_order = list(range(len(trackers))) - - plots = extract_plots(trackers, results, z_order) - - for experiment, experiment_plots in plots.items(): - if len(experiment_plots) == 0: - continue - - doc.append(Section("Experiment " + experiment.identifier)) - - for title, plot in experiment_plots: - - with doc.create(Figure(position='htbp')) as container: - if multipart: - plot_name = plot.identifier + ".pdf" - with storage.write(plot_name, binary=True) as out: - plot.save(out, "PDF") - container.add_image(plot_name) - else: - container.append(insert_figure(plot)) - container.add_caption(title) - - if build: - temp = tempfile.mktemp() - logger.debug("Generating to temporary output %s", temp) - doc.generate_pdf(temp, clean_tex=True) - storage.copy(temp + ".pdf", "report.pdf") - else: - with storage.write("report.tex") as out: - doc.dump(out) + for key, section in reports.items(): + + doc.append(Section(key)) + + for item in section: + if isinstance(item, Table): + make_table(doc, item) + if isinstance(item, Plot): + plot = item + with doc.create(Figure(position='htbp')) as container: + if multipart: + plot_name = plot.identifier + ".pdf" + with storage.write(plot_name, binary=True) as out: + plot.save(out, "PDF") + container.add_image(plot_name) + else: + container.append(insert_figure(plot)) + container.add_caption(plot.identifier) + + logger.debug("Saving plot %s", item.identifier) + item.save(key + "_" + item.identifier + '.pdf', "PDF") + + # TODO: Move to separate function + #if build: + # temp = tempfile.mktemp() + # logger.debug("Generating to temporary output %s", temp) + # doc.generate_pdf(temp, clean_tex=True) + # storage.copy(temp + ".pdf", "report.pdf") + #else: + with storage.write("report.tex") as out: + doc.dump(out) diff --git a/vot/utilities/__init__.py b/vot/utilities/__init__.py index db90717..c87dba5 100644 --- a/vot/utilities/__init__.py +++ b/vot/utilities/__init__.py @@ -157,8 +157,8 @@ def __init__(self, description="Processing", total=100): silent = get_logger().level > logging.INFO if not silent: - self._tqdm = tqdm(disable=False if is_notebook() else None, - bar_format=" {desc:20.20} |{bar}| {percentage:3.0f}% [{elapsed}<{remaining}]") + self._tqdm = tqdm(disable=False if is_notebook() else None, + bar_format=" {desc:20.20} |{bar}| {percentage:3.0f}% [{elapsed}<{remaining}]", file=sys.stdout, leave=False) self._tqdm.desc = description self._tqdm.total = total if silent or self._tqdm.disable: diff --git a/vot/utilities/cli.py b/vot/utilities/cli.py index f9b70f5..96986e6 100644 --- a/vot/utilities/cli.py +++ b/vot/utilities/cli.py @@ -235,14 +235,15 @@ def do_evaluate(config: argparse.Namespace): logger.error("Evaluation interrupted by tracker error: {}".format(te)) def do_analysis(config: argparse.Namespace): - """Run an analysis for a tracker on an experiment stack and a set of sequences. + """Run an analysis for a tracker on an experiment stack and a set of sequences. Analysis results are serialized + to disk either as a JSON file or as a YAML file. Args: config (argparse.Namespace): Configuration """ from vot.analysis import AnalysisProcessor, process_stack_analyses - from vot.document import generate_document + from vot.document import generate_serialized workspace = Workspace.load(config.workspace) @@ -299,9 +300,14 @@ def do_analysis(config: argparse.Namespace): else: name = config.name - storage = workspace.storage.substorage("analysis").substorage(name) + storage = workspace.storage.substorage("analysis") - generate_document(config.format, workspace.report, trackers, workspace.dataset, results, storage) + if config.format == "json": + generate_serialized(trackers, workspace.dataset, results, storage, "json", name) + elif config.format == "yaml": + generate_serialized(trackers, workspace.dataset, results, storage, "yaml", name) + else: + raise ValueError("Unknown format '{}'".format(config.format)) logger.info("Analysis successful, report available as %s", name) @@ -309,6 +315,43 @@ def do_analysis(config: argparse.Namespace): executor.shutdown(wait=True) +def do_report(config: argparse.Namespace): + """Generate a report for a one or multiple trackers on an experiment stack and a set of sequences. + + Args: + config (argparse.Namespace): Configuration + """ + + from vot.document import generate_document + + if config.name is None: + name = "{:%Y-%m-%dT%H-%M-%S.%f%z}".format(datetime.now()) + else: + name = config.name + + workspace = Workspace.load(config.workspace) + + logger.debug("Loaded workspace in '%s'", config.workspace) + + global_registry = [os.path.abspath(x) for x in config.registry] + + registry = Registry(list(workspace.registry) + global_registry, root=config.workspace) + + logger.debug("Found data for %d trackers", len(registry)) + + if not config.trackers: + trackers = workspace.list_results(registry) + else: + trackers = registry.resolve(*config.trackers, storage=workspace.storage.substorage("results"), skip_unknown=False) + + if not trackers: + logger.warning("No trackers resolved, stopping.") + return + + logger.debug("Running analysis for %d trackers", len(trackers)) + + generate_document(workspace, trackers, config.format, name) + def do_pack(config: argparse.Namespace): """Package results to a ZIP file so that they can be submitted to a challenge. @@ -410,11 +453,17 @@ def main(): analysis_parser = subparsers.add_parser('analysis', help='Run analysis of results') analysis_parser.add_argument("trackers", nargs='*', help='Tracker identifiers') analysis_parser.add_argument("--workspace", default=os.getcwd(), help='Workspace path') - analysis_parser.add_argument("--format", choices=("html", "latex", "pdf", "json", "yaml"), default="html", help='Analysis output format') + analysis_parser.add_argument("--format", choices=("json", "yaml"), default="json", help='Analysis output format') analysis_parser.add_argument("--name", required=False, help='Analysis output name') analysis_parser.add_argument("--workers", default=1, required=False, help='Number of parallel workers', type=int) analysis_parser.add_argument("--nocache", default=False, required=False, help="Do not cache data to disk", action='store_true') + report_parser = subparsers.add_parser('report', help='Generate report document') + report_parser.add_argument("trackers", nargs='*', help='Tracker identifiers') + report_parser.add_argument("--workspace", default=os.getcwd(), help='Workspace path') + report_parser.add_argument("--format", choices=("html", "latex", "pdf_plots", "png_plots"), default="html", help='Analysis output format') + report_parser.add_argument("--name", required=False, help='Document output name') + pack_parser = subparsers.add_parser('pack', help='Package results for submission') pack_parser.add_argument("--workspace", default=os.getcwd(), help='Workspace path') pack_parser.add_argument("tracker", help='Tracker identifier') @@ -446,6 +495,9 @@ def check_version(): elif args.action == "analysis": check_version() do_analysis(args) + elif args.action == "report": + check_version() + do_report(args) elif args.action == "pack": check_version() do_pack(args) From 21e0c1ee47def7324fd99a672dcc773f0e34e5fd Mon Sep 17 00:00:00 2001 From: Luka Cehovin Zajc Date: Wed, 14 Jun 2023 11:32:15 +0200 Subject: [PATCH 2/9] Version bump --- vot/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vot/version.py b/vot/version.py index 6dbe830..5369df1 100644 --- a/vot/version.py +++ b/vot/version.py @@ -1,4 +1,4 @@ """ Toolkit version """ -__version__ = '0.6.4' \ No newline at end of file +__version__ = '0.6.5' \ No newline at end of file From f651d19e7d9f3b804ba55a28d6b7e629388012c1 Mon Sep 17 00:00:00 2001 From: Luka Cehovin Zajc Date: Tue, 27 Jun 2023 21:14:19 +0200 Subject: [PATCH 3/9] Fixing ordering of some measures --- vot/analysis/accuracy.py | 2 ++ vot/analysis/longterm.py | 8 ++++---- vot/document/common.py | 11 ++++++++--- vot/utilities/cli.py | 37 ++++++++++++++++++------------------- 4 files changed, 32 insertions(+), 26 deletions(-) diff --git a/vot/analysis/accuracy.py b/vot/analysis/accuracy.py index 27b9e45..77672dd 100644 --- a/vot/analysis/accuracy.py +++ b/vot/analysis/accuracy.py @@ -34,6 +34,8 @@ def gather_overlaps(trajectory: List[Region], groundtruth: List[Region], burnin: Returns: np.ndarray: List of overlaps.""" + assert len(trajectory) == len(groundtruth), "Trajectory and groundtruth must have the same length." + overlaps = np.array(calculate_overlaps(trajectory, groundtruth, bounds)) mask = np.ones(len(overlaps), dtype=bool) diff --git a/vot/analysis/longterm.py b/vot/analysis/longterm.py index a64bc40..2091e1f 100644 --- a/vot/analysis/longterm.py +++ b/vot/analysis/longterm.py @@ -481,8 +481,8 @@ def _title_default(self): def describe(self): """Describes the analysis.""" - return Measure("Non-reported Error", "NRE", 0, 1, Sorting.DESCENDING), \ - Measure("Drift-rate Error", "DRE", 0, 1, Sorting.DESCENDING), \ + return Measure("Non-reported Error", "NRE", 0, 1, Sorting.ASCENDING), \ + Measure("Drift-rate Error", "DRE", 0, 1, Sorting.ASCENDING), \ Measure("Absence-detection Quality", "ADQ", 0, 1, Sorting.DESCENDING), def subcompute(self, experiment: Experiment, tracker: Tracker, sequence: Sequence, dependencies: List[Grid]) -> Tuple[Any]: @@ -562,8 +562,8 @@ def dependencies(self): def describe(self): """Describes the analysis.""" - return Measure("Non-reported Error", "NRE", 0, 1, Sorting.DESCENDING), \ - Measure("Drift-rate Error", "DRE", 0, 1, Sorting.DESCENDING), \ + return Measure("Non-reported Error", "NRE", 0, 1, Sorting.ASCENDING), \ + Measure("Drift-rate Error", "DRE", 0, 1, Sorting.ASCENDING), \ Measure("Absence-detection Quality", "ADQ", 0, 1, Sorting.DESCENDING), def compatible(self, experiment: Experiment): diff --git a/vot/document/common.py b/vot/document/common.py index b8fcd4c..5929255 100644 --- a/vot/document/common.py +++ b/vot/document/common.py @@ -29,6 +29,9 @@ def extract_measures_table(trackers: List[Tracker], results) -> Table: table_data = dict() column_order = [] + def safe(value, default): + return value if not value is None else default + for experiment, eresults in results.items(): for analysis, aresults in eresults.items(): descriptions = analysis.describe() @@ -63,22 +66,24 @@ def extract_measures_table(trackers: List[Tracker], results) -> Table: for i, order in enumerate(column_order): values = [(v[i], k) for k, v in table_data.items()] if order == Sorting.ASCENDING: - values = sorted(values, key=lambda x: x[0] or -math.inf, reverse=False) + values = sorted(values, key=lambda x: safe(x[0], -math.inf), reverse=False) elif order == Sorting.DESCENDING: - values = sorted(values, key=lambda x: x[0] or math.inf, reverse=True) + values = sorted(values, key=lambda x: safe(x[0], math.inf), reverse=True) else: table_order.append(None) continue + order = dict() j = 0 value = None + # Take into account that some values are the same for k, v in enumerate(values): j = j if value == v[0] else k + 1 value = v[0] order[v[1]] = j table_order.append(order) - + return Table(table_header, table_data, table_order) def extract_plots(trackers: List[Tracker], results, order=None): diff --git a/vot/utilities/cli.py b/vot/utilities/cli.py index 96986e6..99fb18b 100644 --- a/vot/utilities/cli.py +++ b/vot/utilities/cli.py @@ -234,31 +234,32 @@ def do_evaluate(config: argparse.Namespace): except TrackerException as te: logger.error("Evaluation interrupted by tracker error: {}".format(te)) -def do_analysis(config: argparse.Namespace): +def do_analysis(args: argparse.Namespace): """Run an analysis for a tracker on an experiment stack and a set of sequences. Analysis results are serialized to disk either as a JSON file or as a YAML file. Args: - config (argparse.Namespace): Configuration + args (argparse.Namespace): Configuration """ + from vot import config from vot.analysis import AnalysisProcessor, process_stack_analyses from vot.document import generate_serialized - workspace = Workspace.load(config.workspace) + workspace = Workspace.load(args.workspace) - logger.debug("Loaded workspace in '%s'", config.workspace) + logger.debug("Loaded workspace in '%s'", args.workspace) - global_registry = [os.path.abspath(x) for x in config.registry] + global_registry = [os.path.abspath(x) for x in args.registry] - registry = Registry(list(workspace.registry) + global_registry, root=config.workspace) + registry = Registry(list(workspace.registry) + global_registry, root=args.workspace) logger.debug("Found data for %d trackers", len(registry)) - if not config.trackers: + if not args.trackers: trackers = workspace.list_results(registry) else: - trackers = registry.resolve(*config.trackers, storage=workspace.storage.substorage("results"), skip_unknown=False) + trackers = registry.resolve(*args.trackers, storage=workspace.storage.substorage("results"), skip_unknown=False) if not trackers: logger.warning("No trackers resolved, stopping.") @@ -266,9 +267,9 @@ def do_analysis(config: argparse.Namespace): logger.debug("Running analysis for %d trackers", len(trackers)) - if config.workers == 1: + if config.worker_pool_size == 1: - if config.debug: + if args.debug: from vot.analysis.processor import DebugExecutor logging.getLogger("concurrent.futures").setLevel(logging.DEBUG) executor = DebugExecutor() @@ -278,9 +279,9 @@ def do_analysis(config: argparse.Namespace): else: from concurrent.futures import ProcessPoolExecutor - executor = ProcessPoolExecutor(config.workers) + executor = ProcessPoolExecutor(config.worker_pool_size) - if config.nocache: + if not config.persistent_cache: from cachetools import LRUCache cache = LRUCache(1000) else: @@ -295,19 +296,19 @@ def do_analysis(config: argparse.Namespace): if results is None: return - if config.name is None: + if args.name is None: name = "{:%Y-%m-%dT%H-%M-%S.%f%z}".format(datetime.now()) else: - name = config.name + name = args.name storage = workspace.storage.substorage("analysis") - if config.format == "json": + if args.format == "json": generate_serialized(trackers, workspace.dataset, results, storage, "json", name) - elif config.format == "yaml": + elif args.format == "yaml": generate_serialized(trackers, workspace.dataset, results, storage, "yaml", name) else: - raise ValueError("Unknown format '{}'".format(config.format)) + raise ValueError("Unknown format '{}'".format(args.format)) logger.info("Analysis successful, report available as %s", name) @@ -455,8 +456,6 @@ def main(): analysis_parser.add_argument("--workspace", default=os.getcwd(), help='Workspace path') analysis_parser.add_argument("--format", choices=("json", "yaml"), default="json", help='Analysis output format') analysis_parser.add_argument("--name", required=False, help='Analysis output name') - analysis_parser.add_argument("--workers", default=1, required=False, help='Number of parallel workers', type=int) - analysis_parser.add_argument("--nocache", default=False, required=False, help="Do not cache data to disk", action='store_true') report_parser = subparsers.add_parser('report', help='Generate report document') report_parser.add_argument("trackers", nargs='*', help='Tracker identifiers') From 29941ad3938f14ae4b7ff0a93dde6fa347233a88 Mon Sep 17 00:00:00 2001 From: Luka Cehovin Zajc Date: Fri, 1 Dec 2023 09:11:17 +0100 Subject: [PATCH 4/9] Renaming document to report --- docs/api/document.rst | 14 +-- vot/{document => report}/__init__.py | 146 ++++++++++++++++++++++++-- vot/{document => report}/commands.tex | 0 vot/{document => report}/common.py | 2 +- vot/{document => report}/html.py | 26 +++-- vot/{document => report}/jquery.js | 0 vot/{document => report}/latex.py | 6 +- vot/{document => report}/pure.css | 0 vot/{document => report}/report.css | 0 vot/{document => report}/report.js | 0 vot/{document => report}/table.js | 0 vot/report/tests.py | 0 vot/report/videos.py | 46 ++++++++ vot/utilities/cli.py | 4 +- vot/workspace/__init__.py | 2 +- 15 files changed, 217 insertions(+), 29 deletions(-) rename vot/{document => report}/__init__.py (83%) rename vot/{document => report}/commands.tex (100%) rename vot/{document => report}/common.py (99%) rename vot/{document => report}/html.py (87%) rename vot/{document => report}/jquery.js (100%) rename vot/{document => report}/latex.py (96%) rename vot/{document => report}/pure.css (100%) rename vot/{document => report}/report.css (100%) rename vot/{document => report}/report.js (100%) rename vot/{document => report}/table.js (100%) create mode 100644 vot/report/tests.py create mode 100644 vot/report/videos.py diff --git a/docs/api/document.rst b/docs/api/document.rst index 09b1b2d..2777d8b 100644 --- a/docs/api/document.rst +++ b/docs/api/document.rst @@ -1,21 +1,21 @@ -Document module +Report module ============ -.. automodule:: vot.document +.. automodule:: vot.report :members: -.. automodule:: vot.document.common +.. automodule:: vot.report.common :members: -HTML document generation +HTML report generation ------------------------ -.. automodule:: vot.document +.. automodule:: vot.report :members: -LaTeX document generation +LaTeX report generation ------------------------- -.. automodule:: vot.document.latex +.. automodule:: vot.report.latex :members: diff --git a/vot/document/__init__.py b/vot/report/__init__.py similarity index 83% rename from vot/document/__init__.py rename to vot/report/__init__.py index 599116d..1490e79 100644 --- a/vot/document/__init__.py +++ b/vot/report/__init__.py @@ -23,10 +23,10 @@ from vot import __version__ as version from vot import get_logger -from vot.dataset import Sequence +from vot.dataset import Sequence, FrameList from vot.tracker import Tracker from vot.analysis import Axes -from vot.experiment import Experiment +#from vot.workspace import Storage, Workspace from vot.utilities import class_fullname from vot.utilities.data import Grid @@ -77,8 +77,13 @@ def axes(self) -> Axes: """ Returns the axes of the plot.""" return self._axes - def save(self, output, fmt): - """ Saves the plot to a file.""" + def save(self, output: str, fmt: str): + """ Saves the plot to a file. + + Args: + output (str): The output file. + fmt (str): The format of the output file. + """ self._figure.savefig(output, format=fmt, bbox_inches='tight', transparent=True) @property @@ -86,6 +91,86 @@ def identifier(self): """ Returns the identifier of the plot.""" return self._identifier +class Video(object): + """ Base class for all videos. """ + + def __init__(self, identifier: str, frames: FrameList, fps: int = 30, trait = None): + """ Initializes the video object. + + Args: + identifier (str): The identifier of the video. + frames (FrameList): The frames of the video. + fps (int): The frames per second of the video. + trait (str): The trait of the video. + """ + + self._identifier = identifier + self._frames = frames + self._fps = fps + self._manager = StyleManager.default() + + def __call__(self, frame: int, key, data): + """ Draws the data on the frame.""" + self.draw(frame, key, data) + + def draw(self, frame: int, key, data): + """ Draws the data on the plot.""" + raise NotImplementedError + + def render(self, frame: int): + """ Renders the frame and returns it as a NumPy array.""" + raise NotImplementedError + + def save(self, output: str, fmt: str): + import tempfile + import shutil + import os + import cv2 + + supported_mappings = { + "mp4": "mp4v", + "avi": "XVID" + } + + if not fmt in supported_mappings: + raise ValueError("Unsupported video format: {}".format(fmt)) + + fourcc = cv2.VideoWriter_fourcc(*supported_mappings[fmt]) + + frame = self.render(0) + height, width, _ = frame.shape + + if not isinstance(output, str): + fd, tempname = tempfile.mkstemp() + os.close(fd) + else: + tempname = output + + writer = cv2.VideoWriter(tempname, fourcc, self._fps, (height, width)) + print(frame.dtype, frame.shape) + writer.write(frame) + + for i in range(1, len(self._frames)): + frame = self.render(i) + + writer.write(frame) + + writer.release() + + if tempname == output: + return + + shutil.copyfileobj(open(tempname, 'rb'), output) + #os.remove(tempname) + + def __len__(self): + return len(self._frames) + + @property + def identifier(self): + """ Returns the identifier of the plot.""" + return self._identifier + class ScatterPlot(Plot): """ A scatter plot.""" @@ -95,8 +180,7 @@ def draw(self, key, data): return style = self._manager.plot_style(key) - handle = self._axes.scatter(data[0], data[1], **style.point_style()) - #handle.set_gid("report_%s_%d" % (self._identifier, style["number"])) + self._axes.scatter(data[0], data[1], **style.point_style()) class LinePlot(Plot): """ A line plot.""" @@ -117,8 +201,41 @@ def draw(self, key, data): style = self._manager.plot_style(key) - handle = self._axes.plot(x, y, **style.line_style()) - # handle[0].set_gid("report_%s_%d" % (self._identifier, style["number"])) + self._axes.plot(x, y, **style.line_style()) + +class ObjectVideo(Video): + + def __init__(self, identifier: str, frames: FrameList, trait=None): + super().__init__(identifier, frames, trait) + self._regions = {} + + def draw(self, frame, key, data): + from vot.region import Region + assert isinstance(data, Region) + + if not key in self._regions: + self._regions[key] = [None] * len(self) + + self._regions[key][frame] = data + + def render(self, frame: int): + from vot.utilities.draw import ImageDrawHandle + + assert frame >= 0 and frame < len(self) + + handle = ImageDrawHandle(self._frames.frame(frame).image()) + + for key, regions in self._regions.items(): + if regions[frame] is None: + continue + + style = self._manager.plot_style(key) + + handle.style(**style.region_style()) + regions[frame].draw(handle) + + return handle.array + class ResultsJSONEncoder(json.JSONEncoder): """ JSON encoder for results. """ @@ -220,6 +337,10 @@ def point_style(self): """ Returns the style for a point.""" raise NotImplementedError + def region_style(self): + """ Returns the style for a region, used with DrawHandle.""" + raise NotImplementedError + class DefaultStyle(PlotStyle): """ The default style for a plot.""" @@ -258,6 +379,11 @@ def point_style(self): marker = DefaultStyle.markers[self._number % len(DefaultStyle.markers)] return dict(marker=marker, c=[color]) + def region_style(self): + """ Returns the style for a region, used with DrawHandle.""" + color = DefaultStyle.colormap((self._number % DefaultStyle.colorcount + 1) / DefaultStyle.colorcount) + return dict(color=color, fill=True) + class Legend(object): """ A legend for a plot.""" @@ -498,7 +624,7 @@ class StackAnalysesPlots(SeparableReport): async def perexperiment(self, experiment, trackers, sequences): - from vot.document.common import extract_plots + from vot.report.common import extract_plots analyses = [analysis for analysis in experiment.analyses if analysis.compatible(experiment)] @@ -517,7 +643,7 @@ class StackAnalysesTable(SeparableReport): async def perexperiment(self, experiment, trackers, sequences): - from vot.document.common import extract_measures_table + from vot.report.common import extract_measures_table analyses = [analysis for analysis in experiment.analyses if analysis.compatible(experiment)] diff --git a/vot/document/commands.tex b/vot/report/commands.tex similarity index 100% rename from vot/document/commands.tex rename to vot/report/commands.tex diff --git a/vot/document/common.py b/vot/report/common.py similarity index 99% rename from vot/document/common.py rename to vot/report/common.py index 5929255..990fdfa 100644 --- a/vot/document/common.py +++ b/vot/report/common.py @@ -4,7 +4,7 @@ from typing import List from vot.tracker import Tracker -from vot.document import ScatterPlot, LinePlot, Table +from vot.report import ScatterPlot, LinePlot, Table from vot.analysis import Measure, Point, Plot, Curve, Sorting, Axes def read_resource(name): diff --git a/vot/document/html.py b/vot/report/html.py similarity index 87% rename from vot/document/html.py rename to vot/report/html.py index 9d0a770..e0029a4 100644 --- a/vot/document/html.py +++ b/vot/report/html.py @@ -6,15 +6,15 @@ from typing import List import dominate -from dominate.tags import h1, h2, table, thead, tbody, tr, th, td, div, p, li, ol, span, style, link, script +from dominate.tags import h1, h2, table, thead, tbody, tr, th, td, div, p, li, ol, span, style, link, script, video from dominate.util import raw from vot import toolkit_version, check_debug from vot.tracker import Tracker from vot.dataset import Sequence from vot.workspace import Storage -from vot.document.common import format_value, read_resource, merge_repeats, extract_measures_table, extract_plots -from vot.document import StyleManager, Table, Plot +from vot.report.common import format_value, read_resource, merge_repeats +from vot.report import StyleManager, Table, Plot, Video from vot.utilities.data import Grid ORDER_CLASSES = {1: "first", 2: "second", 3: "third"} @@ -69,6 +69,15 @@ def generate_html_document(trackers: List[Tracker], sequences: List[Sequence], r storage (Storage): Storage object. """ + def insert_video(data: Video): + """Insert a video into the document.""" + name = data.identifier + ".avi" + + data.save(storage.write(name), "avi") + + with video(src=name, controls=True, preload="auto", autoplay=False, loop=False, width="100%", height="100%"): + raw("Your browser does not support the video tag.") + def insert_figure(figure): """Inserts a matplotlib figure into the document.""" buffer = io.StringIO() @@ -152,10 +161,15 @@ def make_table(data: Table): if isinstance(item, Table): make_table(item) if isinstance(item, Plot): - plot = item with div(cls="plot"): p(key) - insert_figure(plot) - + insert_figure(item) + if isinstance(item, Video): + with div(cls="video"): + p(key) + insert_video(item) + else: + logger.warning("Unsupported report item type %s", item) + with storage.write("report.html") as filehandle: filehandle.write(doc.render()) diff --git a/vot/document/jquery.js b/vot/report/jquery.js similarity index 100% rename from vot/document/jquery.js rename to vot/report/jquery.js diff --git a/vot/document/latex.py b/vot/report/latex.py similarity index 96% rename from vot/document/latex.py rename to vot/report/latex.py index 0aca8f0..afab59f 100644 --- a/vot/document/latex.py +++ b/vot/report/latex.py @@ -13,8 +13,8 @@ from vot.tracker import Tracker from vot.dataset import Sequence from vot.workspace import Storage -from vot.document.common import format_value, read_resource, merge_repeats -from vot.document import StyleManager, Plot, Table +from vot.report.common import format_value, read_resource, merge_repeats +from vot.report import StyleManager, Plot, Table TRACKER_GROUP = "default" @@ -147,6 +147,8 @@ def make_table(doc, table): logger.debug("Saving plot %s", item.identifier) item.save(key + "_" + item.identifier + '.pdf', "PDF") + else: + logger.warning("Unsupported report item type %s", item) # TODO: Move to separate function #if build: diff --git a/vot/document/pure.css b/vot/report/pure.css similarity index 100% rename from vot/document/pure.css rename to vot/report/pure.css diff --git a/vot/document/report.css b/vot/report/report.css similarity index 100% rename from vot/document/report.css rename to vot/report/report.css diff --git a/vot/document/report.js b/vot/report/report.js similarity index 100% rename from vot/document/report.js rename to vot/report/report.js diff --git a/vot/document/table.js b/vot/report/table.js similarity index 100% rename from vot/document/table.js rename to vot/report/table.js diff --git a/vot/report/tests.py b/vot/report/tests.py new file mode 100644 index 0000000..e69de29 diff --git a/vot/report/videos.py b/vot/report/videos.py new file mode 100644 index 0000000..e90e51a --- /dev/null +++ b/vot/report/videos.py @@ -0,0 +1,46 @@ + + +from typing import List + +from attributee import Boolean + +from vot.dataset import Sequence +from vot.tracker import Tracker +from vot.experiment.multirun import MultiRunExperiment, Experiment +from vot.report import ObjectVideo, SeparableReport + + +class PreviewVideos(SeparableReport): + """A report that generates video previews for the tracker results.""" + + groundtruth = Boolean(default=False, description="If set, the groundtruth is shown with the tracker output.") + + async def perexperiment(self, experiment: Experiment, trackers: List[Tracker], sequences: List[Sequence]): + + videos = [] + + for sequence in sequences: + + for tracker in trackers: + + video = ObjectVideo(sequence.identifier + "_" + tracker.identifier, sequence) + + if self.groundtruth: + for frame in range(len(sequence)): + video(frame, "_", sequence.groundtruth(frame)) + + for obj in sequence.objects(): + trajectories = experiment.gather(tracker, sequence, objects=[obj]) + + if len(trajectories) == 0: + continue + + for frame in range(len(sequence)): + video(frame, obj, trajectories[0].region(frame)) + + videos.append(video) + + return videos + + def compatible(self, experiment): + return isinstance(experiment, MultiRunExperiment) \ No newline at end of file diff --git a/vot/utilities/cli.py b/vot/utilities/cli.py index 99fb18b..eb1c999 100644 --- a/vot/utilities/cli.py +++ b/vot/utilities/cli.py @@ -244,7 +244,7 @@ def do_analysis(args: argparse.Namespace): from vot import config from vot.analysis import AnalysisProcessor, process_stack_analyses - from vot.document import generate_serialized + from vot.report import generate_serialized workspace = Workspace.load(args.workspace) @@ -323,7 +323,7 @@ def do_report(config: argparse.Namespace): config (argparse.Namespace): Configuration """ - from vot.document import generate_document + from vot.report import generate_document if config.name is None: name = "{:%Y-%m-%dT%H-%M-%S.%f%z}".format(datetime.now()) diff --git a/vot/workspace/__init__.py b/vot/workspace/__init__.py index 08600d7..80571dc 100644 --- a/vot/workspace/__init__.py +++ b/vot/workspace/__init__.py @@ -14,7 +14,7 @@ from ..tracker import Registry, Tracker from ..stack import Stack, resolve_stack from ..utilities import normalize_path -from ..document import ReportConfiguration +from ..report import ReportConfiguration from .storage import LocalStorage, Storage, NullStorage From 59d9d6a6c769c42f0a7ac2ffbaf1ce735fa52ab5 Mon Sep 17 00:00:00 2001 From: Luka Cehovin Zajc Date: Fri, 1 Dec 2023 09:16:09 +0100 Subject: [PATCH 5/9] Small fixes --- vot/analysis/tests.py | 24 ++++++++++++++++++++++++ vot/dataset/__init__.py | 13 ++++++------- vot/dataset/common.py | 8 +++++++- 3 files changed, 37 insertions(+), 8 deletions(-) create mode 100644 vot/analysis/tests.py diff --git a/vot/analysis/tests.py b/vot/analysis/tests.py new file mode 100644 index 0000000..e6e754e --- /dev/null +++ b/vot/analysis/tests.py @@ -0,0 +1,24 @@ +""" Unit tests for analysis module. """ + + +import unittest + +class Tests(unittest.TestCase): + """ Unit tests for analysis module. """ + + def test_perfect_accuracy(self): + import numpy as np + + from vot.region import Rectangle, Special + from vot.analysis.accuracy import gather_overlaps + + trajectory = [Rectangle(0, 0, 100, 100)] * 30 + groundtruth = [Rectangle(0, 0, 100, 100)] * 30 + + trajectory[0] = Special(1) + + overlaps = gather_overlaps(trajectory, groundtruth) + + print(overlaps) + + self.assertEqual(np.mean(overlaps), 1) \ No newline at end of file diff --git a/vot/dataset/__init__.py b/vot/dataset/__init__.py index 960dfc5..210fd3d 100644 --- a/vot/dataset/__init__.py +++ b/vot/dataset/__init__.py @@ -284,8 +284,7 @@ def append(self, image): self._images.append(image) - @property - def length(self) -> int: + def __len__(self) -> int: """Returns the length of the sequence channel in number of frames Returns: @@ -951,7 +950,7 @@ def __init__(self, name, channels): Raises: DatasetException: If images are not provided for all channels """ - super().__init__(name, None) + super().__init__(name) self._channels = {c: InMemoryChannel() for c in channels} self._tags = {} self._values = {} @@ -1014,7 +1013,8 @@ def channels(self) -> List[str]: List[str]: List of channel names """ - return self._channels.keys() + print(self._channels.keys()) + return set(self._channels.keys()) def frame(self, index : int) -> "Frame": """Returns the specified frame. The frame is returned as a Frame object. @@ -1133,15 +1133,14 @@ def size(self) -> tuple: tuple: Sequence size """ return self.channel().size - - @property + def channels(self) -> list: """Returns a list of channel names Returns: list: List of channel names """ - return self._channels.keys() + return set(self._channels.keys()) def download_bundle(url: str, path: str = "."): """Downloads a dataset bundle as a ZIP file and decompresses it. diff --git a/vot/dataset/common.py b/vot/dataset/common.py index 439f647..709bc7e 100644 --- a/vot/dataset/common.py +++ b/vot/dataset/common.py @@ -319,5 +319,11 @@ def write_sequence(directory: str, sequence: Sequence): with open(os.path.join(directory, "%s.value" % value), "w") as fp: fp.write(data) - write_trajectory(os.path.join(directory, "groundtruth.txt"), [f.groundtruth() for f in sequence]) + # Write groundtruth in case of single object + if len(sequence.objects()) == 1: + write_trajectory(os.path.join(directory, "groundtruth.txt"), [f.groundtruth() for f in sequence]) + else: + for id in sequence.objects(): + write_trajectory(os.path.join(directory, "groundtruth_%s.txt" % id), [f.object(id) for f in sequence]) + write_properties(os.path.join(directory, "sequence"), metadata) From eab66b33b051db53c91097dba75f7ff46615ce30 Mon Sep 17 00:00:00 2001 From: Luka Cehovin Zajc Date: Mon, 11 Dec 2023 14:32:36 +0100 Subject: [PATCH 6/9] Some basic VOST support --- vot/dataset/__init__.py | 7 +-- vot/dataset/proxy.py | 27 ++++++++++- vot/experiment/transformer.py | 26 ++++++++-- vot/region/raster.py | 90 +++++++++++++++++++---------------- vot/region/tests.py | 12 +++++ vot/utilities/cli.py | 8 +++- 6 files changed, 119 insertions(+), 51 deletions(-) diff --git a/vot/dataset/__init__.py b/vot/dataset/__init__.py index 210fd3d..3376331 100644 --- a/vot/dataset/__init__.py +++ b/vot/dataset/__init__.py @@ -867,11 +867,12 @@ def groundtruth(self, index=None): Returns: Region: Groundtruth region """ - data = self.__preload() - if len(self.objects()) != 1: + objids = self.objects() + + if len(objids) != 1: raise DatasetException("More than one object in sequence") - id = next(iter(data.objects)) + id = next(iter(objids)) return self.object(id, index) def tags(self, index: int = None) -> List[str]: diff --git a/vot/dataset/proxy.py b/vot/dataset/proxy.py index c1bc1d1..36c34e4 100644 --- a/vot/dataset/proxy.py +++ b/vot/dataset/proxy.py @@ -26,7 +26,7 @@ def __len__(self): Returns: int: Length of the sequence. """ - return len(self) + return len(self._source) def frame(self, index: int) -> Frame: """Returns a frame object for the given index. Forwards the request to the source sequence. @@ -384,4 +384,27 @@ def groundtruth(self, index: int = None) -> List[Region]: Args: index (int): Index of the frame. """ - return self._source.object(self._id, index) \ No newline at end of file + return self._source.object(self._id, index) + +class ObjectsHideFilterSequence(ProxySequence): + """A proxy sequence that virtually removes specified objects from the sequence. Note that the object is not removed from the sequence, but only hidden when listing them. + """ + + def __init__(self, source: Sequence, ids: Set[str]): + """Creates an object hide filter proxy sequence. + + Args: + source (Sequence): Source sequence object + ids (Set[str]): IDs of the objects that will be hidden in the proxy sequence. + """ + super().__init__(source) + self._ids = ids + + def objects(self): + """Returns a dictionary of all objects in the sequence. + + Returns: + Dict[str, Object]: Dictionary of all objects in the sequence. + """ + objects = self._source.objects() + return {id for id in objects if id not in self._ids} diff --git a/vot/experiment/transformer.py b/vot/experiment/transformer.py index d8a932e..70d5fb4 100644 --- a/vot/experiment/transformer.py +++ b/vot/experiment/transformer.py @@ -2,11 +2,11 @@ import os from abc import abstractmethod -from typing import List +import typing from PIL import Image -from attributee import Attributee, Integer, Float, Boolean +from attributee import Attributee, Integer, Float, Boolean, String, List from vot.dataset import Sequence, InMemorySequence from vot.dataset.proxy import FrameMapSequence @@ -28,7 +28,7 @@ def __init__(self, cache: "LocalStorage", **kwargs): self._cache = cache @abstractmethod - def __call__(self, sequence: Sequence) -> List[Sequence]: + def __call__(self, sequence: Sequence) -> typing.List[Sequence]: """Generate a list of sequences from the given sequence. The generated sequences are stored in the cache if needed. Args: @@ -45,7 +45,7 @@ class SingleObject(Transformer): trim = Boolean(default=False, description="Trim each generated sequence to a visible subsection for the selected object") - def __call__(self, sequence: Sequence) -> List[Sequence]: + def __call__(self, sequence: Sequence) -> typing.List[Sequence]: """Generate a list of sequences from the given sequence. Args: @@ -69,7 +69,7 @@ class Redetection(Transformer): padding = Float(default=2, val_min=0) scaling = Float(default=1, val_min=0.1, val_max=10) - def __call__(self, sequence: Sequence) -> List[Sequence]: + def __call__(self, sequence: Sequence) -> typing.List[Sequence]: """Generate a list of sequences from the given sequence. Args: @@ -110,3 +110,19 @@ def __call__(self, sequence: Sequence) -> List[Sequence]: source = read_sequence(chache_dir) mapping = [0] * self.initialization + [1] * (len(self) - self.initialization) return [FrameMapSequence(source, mapping)] + +@transformer_registry.register("ignore") +class IgnoreObjects(Transformer): + """Transformer that hides objects with certain ids from the sequence.""" + + ids = List(String(), default=[], description="List of ids to be ignored") + + def __call__(self, sequence: Sequence) -> typing.List[Sequence]: + """Generate a list of sequences from the given sequence. + + Args: + sequence (Sequence): The sequence to be transformed. + """ + from vot.dataset.proxy import ObjectsHideFilterSequence + + return [ObjectsHideFilterSequence(sequence, self.ids)] \ No newline at end of file diff --git a/vot/region/raster.py b/vot/region/raster.py index 8018cf9..f774195 100644 --- a/vot/region/raster.py +++ b/vot/region/raster.py @@ -279,7 +279,7 @@ def _region_raster(a: np.ndarray, bounds: Tuple[int, int, int, int], t: int, o: @numba.njit(cache=True) def _calculate_overlap(a: np.ndarray, b: np.ndarray, at: int, bt: int, ao: Optional[Tuple[int, int]] = None, - bo: Optional[Tuple[int, int]] = None, bounds: Optional[Tuple[int, int]] = None): + bo: Optional[Tuple[int, int]] = None, bounds: Optional[Tuple[int, int]] = None, ignore: Optional[np.array] = None, it: Optional[int] = None, io: Optional[Tuple[int, int]] = None): """ Calculate the overlap between two regions. This is a Numba implementation of the function that is compiled to machine code for faster execution. Args: @@ -322,11 +322,21 @@ def _calculate_overlap(a: np.ndarray, b: np.ndarray, at: int, bt: int, ao: Optio intersection = 0 union_ = 0 - for i in range(a1.size): - if a1[i] != 0 or a2[i] != 0: - union_ += 1 - if a1[i] != 0 and a2[i] != 0: - intersection += 1 + if not ignore is None and it != _TYPE_EMPTY: + m3 = _region_raster(ignore, raster_bounds, it, io) + a3 = m3.ravel() + for i in range(a1.size): + if m3[i] != 0: + if a1[i] != 0 or a2[i] != 0: + union_ += 1 + if a1[i] != 0 and a2[i] != 0: + intersection += 1 + else: + for i in range(a1.size): + if a1[i] != 0 or a2[i] != 0: + union_ += 1 + if a1[i] != 0 and a2[i] != 0: + intersection += 1 return float(intersection) / float(union_) if union_ > 0 else float(0) @@ -335,62 +345,57 @@ def _calculate_overlap(a: np.ndarray, b: np.ndarray, at: int, bt: int, ao: Optio Bounds = Tuple[int, int] -def calculate_overlap(reg1: Shape, reg2: Shape, bounds: Optional[Bounds] = None): +def _infer_meta(reg: Region): + if isinstance(reg, Rectangle): + data1 = np.round(reg._data) + offset1 = (0, 0) + type1 = _TYPE_RECTANGLE + elif isinstance(reg, Polygon): + data1 = np.round(reg._points) + offset1 = (0, 0) + type1 = _TYPE_POLYGON + elif isinstance(reg, Mask): + data1 = reg.mask + offset1 = reg.offset + type1 = _TYPE_MASK + else: + data1 = np.zeros((1, 1)) + offset1 = (0, 0) + type1 = _TYPE_EMPTY + + return data1, offset1, type1 + +def calculate_overlap(reg1: Shape, reg2: Shape, bounds: Optional[Bounds] = None, ignore: Optional[Shape] = None): """ Calculate the overlap between two regions. The function first rasterizes both regions to 2-D binary masks and calculates overlap between them Args: reg1: first region reg2: second region bounds: 2-tuple with the bounds of the image (width, height) + ignore: region to ignore when calculating overlap, usually a mask Returns: float with the overlap between the two regions. Note that overlap is one by definition if both regions are empty. """ - if isinstance(reg1, Rectangle): - data1 = np.round(reg1._data) - offset1 = (0, 0) - type1 = _TYPE_RECTANGLE - elif isinstance(reg1, Polygon): - data1 = np.round(reg1._points) - offset1 = (0, 0) - type1 = _TYPE_POLYGON - elif isinstance(reg1, Mask): - data1 = reg1.mask - offset1 = reg1.offset - type1 = _TYPE_MASK - else: - data1 = np.zeros((1, 1)) - offset1 = (0, 0) - type1 = _TYPE_EMPTY + data1, offset1, type1 = _infer_meta(reg1) + data2, offset2, type2 = _infer_meta(reg2) - if isinstance(reg2, Rectangle): - data2 = np.round(reg2._data) - offset2 = (0, 0) - type2 = _TYPE_RECTANGLE - elif isinstance(reg2, Polygon): - data2 = np.round(reg2._points) - offset2 = (0, 0) - type2 = _TYPE_POLYGON - elif isinstance(reg2, Mask): - data2 = reg2.mask - offset2 = reg2.offset - type2 = _TYPE_MASK - else: - data2 = np.zeros((1, 1)) - offset2 = (0, 0) - type2 = _TYPE_EMPTY + if not ignore is None: + ignore_data, ignore_offset, ignore_type = _infer_meta(ignore) + return _calculate_overlap(data1, data2, type1, type2, offset1, offset2, bounds, ignore_data, ignore_type, ignore_offset) return _calculate_overlap(data1, data2, type1, type2, offset1, offset2, bounds) -def calculate_overlaps(first: List[Region], second: List[Region], bounds: Optional[Bounds] = None): +def calculate_overlaps(first: List[Region], second: List[Region], bounds: Optional[Bounds] = None, ignore: Optional[List[Region]] = None): """ Calculate the overlap between two lists of regions. The function first rasterizes both regions to 2-D binary masks and calculates overlap between them Args: first: first list of regions second: second list of regions bounds: 2-tuple with the bounds of the image (width, height) + ignore: list of regions to ignore when calculating overlap, usually a list of masks Returns: list of floats with the overlap between the two regions. Note that overlap is one by definition if both regions are empty. @@ -400,4 +405,9 @@ def calculate_overlaps(first: List[Region], second: List[Region], bounds: Option """ if not len(first) == len(second): raise RegionException("List not of the same size {} != {}".format(len(first), len(second))) + + if not ignore is None: + if not len(first) == len(ignore): + raise RegionException("List not of the same size {} != {}".format(len(first), len(ignore))) + return [calculate_overlap(pairs[0], pairs[1], bounds=bounds, ignore=ignore[i]) for i, pairs in enumerate(zip(first, second))] return [calculate_overlap(pairs[0], pairs[1], bounds=bounds) for i, pairs in enumerate(zip(first, second))] diff --git a/vot/region/tests.py b/vot/region/tests.py index b1a2ce9..d85c773 100644 --- a/vot/region/tests.py +++ b/vot/region/tests.py @@ -34,6 +34,18 @@ def test_calculate_overlap(self): r1 = Rectangle(0, 0, 0, 0) self.assertEqual(calculate_overlap(r1, r1), 1) + def test_ignore_mask(self): + """Tests if the mask ignore works correctly.""" + from vot.region import Mask + + r1 = Mask(np.ones((100, 100), dtype=np.uint8)) + r2 = Mask(np.ones((100, 100), dtype=np.uint8)) + ignore = Mask(np.zeros((100, 100), dtype=np.uint8)) + self.assertEqual(calculate_overlap(r1, r2, ignore=ignore), 0) + + ignore = Mask(np.ones((100, 100), dtype=np.uint8)) + self.assertEqual(calculate_overlap(r1, r2, ignore=ignore), 1) + def test_empty_mask(self): """Tests if the empty mask is correctly detected.""" from vot.region import Mask diff --git a/vot/utilities/cli.py b/vot/utilities/cli.py index eb1c999..dfa5a67 100644 --- a/vot/utilities/cli.py +++ b/vot/utilities/cli.py @@ -48,6 +48,7 @@ def do_test(config: argparse.Namespace): from vot.dataset import load_sequence, Frame from vot.tracker import ObjectStatus from vot.experiment.helpers import MultiObjectHelper + from vot.dataset.proxy import ObjectsHideFilterSequence trackers = Registry(config.registry) @@ -93,6 +94,9 @@ def visualize(axes, frame: Frame, reference, state): else: sequence = load_sequence(normalize_path(config.sequence)) + if config.ignore: + sequence = ObjectsHideFilterSequence(sequence, config.ignore) + logger.info("Obtaining runtime for tracker %s", tracker.identifier) context = {"continue" : True} @@ -110,7 +114,8 @@ def on_press(event): import matplotlib.pylab as plt from vot.utilities.draw import MatplotlibDrawHandle figure = plt.figure() - figure.canvas.set_window_title('VOT Test') + if hasattr(figure.canvas, "set_window_title"): + figure.canvas.set_window_title('VOT Test') axes = figure.add_subplot(1, 1, 1) axes.set_aspect("equal") handle = MatplotlibDrawHandle(axes, size=sequence.size) @@ -439,6 +444,7 @@ def main(): test_parser.add_argument("tracker", help='Tracker identifier', nargs="?") test_parser.add_argument("--visualize", "-g", default=False, required=False, help='Visualize results of the test session', action='store_true') test_parser.add_argument("--sequence", "-s", required=False, help='Path to sequence to use instead of dummy') + test_parser.add_argument("--ignore", required=False, help='Object IDs to ignore', type=lambda x: x.split(","), default=[]) workspace_parser = subparsers.add_parser('initialize', help='Setup a new workspace and download data') workspace_parser.add_argument("--workspace", default=os.getcwd(), help='Workspace path') From 7b3c444350fb71d3a23b50faa5cdc3cbf195cf25 Mon Sep 17 00:00:00 2001 From: Luka Cehovin Zajc Date: Wed, 24 Jan 2024 13:19:19 +0000 Subject: [PATCH 7/9] Fixing realtime experiments --- vot/tracker/__init__.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/vot/tracker/__init__.py b/vot/tracker/__init__.py index eb6a468..5c3f377 100644 --- a/vot/tracker/__init__.py +++ b/vot/tracker/__init__.py @@ -604,7 +604,7 @@ def __init__(self, runtime: TrackerRuntime, grace: int = 1, interval: float = 0. self._interval = interval self._countdown = 0 self._time = 0 - self._out = None + self._status = None @property def multiobject(self): @@ -615,13 +615,13 @@ def stop(self): """Stops the tracker runtime.""" self._runtime.stop() self._time = 0 - self._out = None + self._status = None def restart(self): """Restarts the tracker runtime, usually stars a new process.""" self._runtime.restart() self._time = 0 - self._out = None + self._status = None def initialize(self, frame: Frame, new: Objects = None, properties: dict = None) -> Tuple[Objects, float]: """Initializes the tracker runtime with specified frame and objects. Returns the initial objects and the time it took to initialize the tracker. @@ -635,9 +635,9 @@ def initialize(self, frame: Frame, new: Objects = None, properties: dict = None) Tuple[Objects, float] -- The initial objects and the time it took to initialize the tracker. """ self._countdown = self._grace - self._out = None + self._status = None - out, prop, time = self._runtime.initialize(frame, new, properties) + status, time = self._runtime.initialize(frame, new, properties) if time > self._interval: if self._countdown > 0: @@ -645,11 +645,11 @@ def initialize(self, frame: Frame, new: Objects = None, properties: dict = None) self._time = 0 else: self._time = time - self._interval - self._out = out + self._status = status else: self._time = 0 - return out, prop, time + return status, time def update(self, frame: Frame, _: Objects = None, properties: dict = None) -> Tuple[Objects, float]: @@ -666,12 +666,12 @@ def update(self, frame: Frame, _: Objects = None, properties: dict = None) -> Tu if self._time > self._interval: self._time = self._time - self._interval - return self._out, dict(), 0 + return self._status, 0 else: - self._out = None + self._status = None self._time = 0 - out, prop, time = self._runtime.update(frame, properties) + status, time = self._runtime.update(frame, properties) if time > self._interval: if self._countdown > 0: @@ -679,9 +679,9 @@ def update(self, frame: Frame, _: Objects = None, properties: dict = None) -> Tu self._time = 0 else: self._time = time - self._interval - self._out = out + self._status = status - return out, prop, time + return status, time class PropertyInjectorTrackerRuntime(TrackerRuntime): @@ -810,7 +810,10 @@ def update(self, frame: Frame, new: Objects = None, properties: dict = None) -> Tuple[Objects, float] -- The updated objects and the time it took to update the tracker. """ - if not new is None: raise TrackerException("Only supports single object tracking", tracker=self.tracker) + if not new is None and isinstance(new, list) and len(new) != 0: + raise TrackerException("Only supports single object tracking", tracker=self.tracker) + if new is None: + new = [] status, time = self._runtime.update(frame, new, properties) if isinstance(status, list): status = status[0] return status, time From afe26425768487a34f2f2b76d2d6665cad3aa457 Mon Sep 17 00:00:00 2001 From: Luka Cehovin Zajc Date: Wed, 24 Jan 2024 13:19:43 +0000 Subject: [PATCH 8/9] Fixing reinitialization test experiments - longterm --- vot/experiment/transformer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vot/experiment/transformer.py b/vot/experiment/transformer.py index 70d5fb4..be01e68 100644 --- a/vot/experiment/transformer.py +++ b/vot/experiment/transformer.py @@ -108,7 +108,7 @@ def __call__(self, sequence: Sequence) -> typing.List[Sequence]: write_sequence(chache_dir, generated) source = read_sequence(chache_dir) - mapping = [0] * self.initialization + [1] * (len(self) - self.initialization) + mapping = [0] * self.initialization + [1] * (len(source) - self.initialization) return [FrameMapSequence(source, mapping)] @transformer_registry.register("ignore") From ce5dda8dbde496ac8fefa66ae0821e3b44472d14 Mon Sep 17 00:00:00 2001 From: Luka Cehovin Zajc Date: Wed, 24 Jan 2024 13:20:12 +0000 Subject: [PATCH 9/9] Renaming stacks to a more uniform format --- vot/stack/vot2021/{lt.yaml => longterm.yaml} | 0 vot/stack/vot2021/{st.yaml => shortterm.yaml} | 0 vot/stack/vot2022/{lt.yaml => longterm.yaml} | 0 vot/stack/vot2022/{sts.yaml => shortterm.yaml} | 0 vot/stack/vot2022/{stb.yaml => shorttermbox.yaml} | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename vot/stack/vot2021/{lt.yaml => longterm.yaml} (100%) rename vot/stack/vot2021/{st.yaml => shortterm.yaml} (100%) rename vot/stack/vot2022/{lt.yaml => longterm.yaml} (100%) rename vot/stack/vot2022/{sts.yaml => shortterm.yaml} (100%) rename vot/stack/vot2022/{stb.yaml => shorttermbox.yaml} (100%) diff --git a/vot/stack/vot2021/lt.yaml b/vot/stack/vot2021/longterm.yaml similarity index 100% rename from vot/stack/vot2021/lt.yaml rename to vot/stack/vot2021/longterm.yaml diff --git a/vot/stack/vot2021/st.yaml b/vot/stack/vot2021/shortterm.yaml similarity index 100% rename from vot/stack/vot2021/st.yaml rename to vot/stack/vot2021/shortterm.yaml diff --git a/vot/stack/vot2022/lt.yaml b/vot/stack/vot2022/longterm.yaml similarity index 100% rename from vot/stack/vot2022/lt.yaml rename to vot/stack/vot2022/longterm.yaml diff --git a/vot/stack/vot2022/sts.yaml b/vot/stack/vot2022/shortterm.yaml similarity index 100% rename from vot/stack/vot2022/sts.yaml rename to vot/stack/vot2022/shortterm.yaml diff --git a/vot/stack/vot2022/stb.yaml b/vot/stack/vot2022/shorttermbox.yaml similarity index 100% rename from vot/stack/vot2022/stb.yaml rename to vot/stack/vot2022/shorttermbox.yaml