diff --git a/app/grandchallenge/algorithms/templates/algorithms/algorithmmodel_detail.html b/app/grandchallenge/algorithms/templates/algorithms/algorithmmodel_detail.html
new file mode 100644
index 0000000000..602be32203
--- /dev/null
+++ b/app/grandchallenge/algorithms/templates/algorithms/algorithmmodel_detail.html
@@ -0,0 +1,89 @@
+{% extends "base.html" %}
+{% load static %}
+{% load crispy_forms_tags %}
+{% load url %}
+{% load guardian_tags %}
+{% load humanize %}
+{% load user_profile_link from profiles %}
+{% load naturaldelta %}
+
+{% block title %}
+ Algorithm Model - {{ block.super }}
+{% endblock %}
+
+{% block breadcrumbs %}
+
+ - Algorithms
+
+ - {{ object.algorithm.title }}
+
+
+ - {{ object }}
+
+
+{% endblock %}
+
+{% block content %}
+
Algorithm Model
+
+ {% get_obj_perms request.user for object as "algorithm_model_perms" %}
+ {% get_obj_perms request.user for object.algorithm as "algorithm_perms" %}
+
+
{% if object.is_desired_version %} Active model for this algorithm {% else %} Inactive {% endif %}
+
+
+ - ID
+ - {{ object.pk }}
+
+ - Algorithm
+ -
+ {{ object.algorithm.title }}
+
+
+ - Creator
+ -
+ {{ object.creator|user_profile_link }}
+
+
+ - Created
+ - {{ object.created }}
+
+ {% if object.model %}
+ - Model
+
+ - {{ object.model.name }}
+
+ - Model Size
+
+ - {{ object.model.size|naturalsize }}
+ {% endif %}
+
+ {% if object.sha256 %}
+ - SHA256
+ - {{ object.sha256 }}
+ {% endif %}
+
+ - Import Status
+ -
+
+ {% if object.import_in_progress %}
+
+ {% endif %}
+ {{ object.get_import_status_display }}
+
+
+
+ {% if object.status %}
+ - Validation Errors
+ - {{ object.status }}
+ {% endif %}
+
+ - Comment
+ - {{ object.comment }}
+
+
+{% endblock %}
diff --git a/app/grandchallenge/algorithms/templates/algorithms/algorithmmodel_form.html b/app/grandchallenge/algorithms/templates/algorithms/algorithmmodel_form.html
new file mode 100644
index 0000000000..a4b4d146d6
--- /dev/null
+++ b/app/grandchallenge/algorithms/templates/algorithms/algorithmmodel_form.html
@@ -0,0 +1,32 @@
+{% extends "base.html" %}
+{% load crispy_forms_tags %}
+{% load url %}
+
+{% block title %}
+ Create An Algorithm Model - {{ block.super }}
+{% endblock %}
+
+{% block breadcrumbs %}
+
+ - Algorithms
+
+ - {{ algorithm.title }}
+
+ - Create model
+
+
+{% endblock %}
+
+{% block content %}
+
+
Create An Algorithm Model
+
+
+ Upload a model that will be extracted to /opt/ml/model/
during inference.
+
+
+ {% crispy form %}
+
+{% endblock %}
diff --git a/app/grandchallenge/algorithms/templates/algorithms/job_detail.html b/app/grandchallenge/algorithms/templates/algorithms/job_detail.html
index fcb348cf04..76602df168 100644
--- a/app/grandchallenge/algorithms/templates/algorithms/job_detail.html
+++ b/app/grandchallenge/algorithms/templates/algorithms/job_detail.html
@@ -133,6 +133,9 @@
Result Reference Data
Algorithm Version
{{ object.algorithm_image.pk }}
+
Model Version
+
{% if object.algorithm_model %}{{ object.algorithm_model.pk }}{% else %}None{% endif %}
+
Creator
{{ object.creator|user_profile_link }}
diff --git a/app/grandchallenge/algorithms/urls.py b/app/grandchallenge/algorithms/urls.py
index a77024eb61..19fee279ae 100644
--- a/app/grandchallenge/algorithms/urls.py
+++ b/app/grandchallenge/algorithms/urls.py
@@ -10,6 +10,8 @@
AlgorithmImageUpdate,
AlgorithmImportView,
AlgorithmList,
+ AlgorithmModelCreate,
+ AlgorithmModelDetail,
AlgorithmPermissionRequestCreate,
AlgorithmPermissionRequestList,
AlgorithmPermissionRequestUpdate,
@@ -66,6 +68,16 @@
AlgorithmImageUpdate.as_view(),
name="image-update",
),
+ path(
+ "
/models//",
+ AlgorithmModelDetail.as_view(),
+ name="model-detail",
+ ),
+ path(
+ "/models/create/",
+ AlgorithmModelCreate.as_view(),
+ name="model-create",
+ ),
path("/jobs/", JobsList.as_view(), name="job-list"),
path("/jobs/create/", JobCreate.as_view(), name="job-create"),
path("/jobs//", JobDetail.as_view(), name="job-detail"),
diff --git a/app/grandchallenge/algorithms/views.py b/app/grandchallenge/algorithms/views.py
index ddc957dd7d..b6e0e6e8fc 100644
--- a/app/grandchallenge/algorithms/views.py
+++ b/app/grandchallenge/algorithms/views.py
@@ -9,7 +9,7 @@
from django.contrib.messages.views import SuccessMessageMixin
from django.core.cache import cache
from django.core.exceptions import PermissionDenied, ValidationError
-from django.db.models import OuterRef, Subquery, Window
+from django.db.models import Count, OuterRef, Q, Subquery, Window
from django.db.models.functions import Rank
from django.forms.utils import ErrorList
from django.http import HttpResponse, HttpResponseRedirect
@@ -43,6 +43,7 @@
AlgorithmImageForm,
AlgorithmImageUpdateForm,
AlgorithmImportForm,
+ AlgorithmModelForm,
AlgorithmPermissionRequestUpdateForm,
AlgorithmPublishForm,
AlgorithmRepoForm,
@@ -57,6 +58,7 @@
from grandchallenge.algorithms.models import (
Algorithm,
AlgorithmImage,
+ AlgorithmModel,
AlgorithmPermissionRequest,
Job,
)
@@ -500,7 +502,8 @@ def get_context_data(self, *args, **kwargs):
)
return context
- def form_valid(self, form):
+ def form_valid(self, form): # noqa: C901
+ # TODO this should all be in the forms save method, not in the view
def create_upload_session(image_files):
upload_session = RawImageUploadSession.objects.create(
creator=self.request.user
@@ -514,7 +517,7 @@ def create_upload_session(image_files):
interfaces = {ci.slug: ci for ci in self.algorithm.inputs.all()}
for slug, value in form.cleaned_data.items():
- if slug == "algorithm_image":
+ if slug in ["algorithm_image", "algorithm_model"]:
continue
ci = interfaces[slug]
@@ -557,21 +560,50 @@ def create_upload_session(image_files):
civ = ci.create_instance(value=value)
component_interface_values.append(civ)
- job = Job.objects.create(
- creator=self.request.user,
- algorithm_image=form.cleaned_data["algorithm_image"],
- extra_logs_viewer_groups=[self.algorithm.editors_group],
- input_civ_set=component_interface_values,
- time_limit=self.algorithm.time_limit,
- )
- job.sort_inputs_and_execute(upload_session_pks=upload_session_pks)
+ # check that this job hasn't been run yet:
+ unique_kwargs = {
+ "algorithm_image": form.cleaned_data["algorithm_image"],
+ }
+ input_interface_count = self.algorithm.inputs.count()
+ if form.cleaned_data["algorithm_model"]:
+ unique_kwargs["algorithm_model"] = form.cleaned_data[
+ "algorithm_model"
+ ]
+
+ if (
+ Job.objects.filter(**unique_kwargs)
+ .annotate(
+ inputs_match_count=Count(
+ "inputs",
+ filter=Q(inputs__in=component_interface_values),
+ )
+ )
+ .filter(inputs_match_count=input_interface_count)
+ .exists()
+ ):
+ form.add_error(
+ None,
+ "A result for these inputs with the current image "
+ "and model already exists.",
+ )
+ return self.form_invalid(form)
+ else:
+ job = Job.objects.create(
+ creator=self.request.user,
+ algorithm_image=form.cleaned_data["algorithm_image"],
+ algorithm_model=form.cleaned_data["algorithm_model"],
+ extra_logs_viewer_groups=[self.algorithm.editors_group],
+ input_civ_set=component_interface_values,
+ time_limit=self.algorithm.time_limit,
+ )
+ job.sort_inputs_and_execute(upload_session_pks=upload_session_pks)
- return HttpResponseRedirect(
- reverse(
- "algorithms:job-progress-detail",
- kwargs={"slug": self.kwargs["slug"], "pk": job.pk},
+ return HttpResponseRedirect(
+ reverse(
+ "algorithms:job-progress-detail",
+ kwargs={"slug": self.kwargs["slug"], "pk": job.pk},
+ )
)
- )
class JobProgressDetail(
@@ -1021,3 +1053,45 @@ def form_valid(self, form):
self.success_url = form.algorithm.get_absolute_url()
return super().form_valid(form=form)
+
+
+class AlgorithmModelCreate(
+ LoginRequiredMixin,
+ VerificationRequiredMixin,
+ UserFormKwargsMixin,
+ ObjectPermissionRequiredMixin,
+ SuccessMessageMixin,
+ CreateView,
+):
+ model = AlgorithmModel
+ form_class = AlgorithmModelForm
+ permission_required = "algorithms.change_algorithm"
+ raise_exception = True
+ success_message = "Model validation and upload in progress."
+
+ @property
+ def algorithm(self):
+ return get_object_or_404(Algorithm, slug=self.kwargs["slug"])
+
+ def get_form_kwargs(self):
+ kwargs = super().get_form_kwargs()
+ kwargs.update({"algorithm": self.algorithm})
+ return kwargs
+
+ def get_permission_object(self):
+ return self.algorithm
+
+ def get_context_data(self, *args, **kwargs):
+ context = super().get_context_data(*args, **kwargs)
+ context.update({"algorithm": self.algorithm})
+ return context
+
+
+class AlgorithmModelDetail(
+ LoginRequiredMixin,
+ ObjectPermissionRequiredMixin,
+ DetailView,
+):
+ model = AlgorithmModel
+ permission_required = "algorithms.view_algorithmmodel"
+ raise_exception = True
diff --git a/app/grandchallenge/components/backends/amazon_sagemaker_training.py b/app/grandchallenge/components/backends/amazon_sagemaker_training.py
index 5954ea0daf..0242c59efe 100644
--- a/app/grandchallenge/components/backends/amazon_sagemaker_training.py
+++ b/app/grandchallenge/components/backends/amazon_sagemaker_training.py
@@ -70,8 +70,6 @@ def _create_job_boto(self):
},
Environment={
**self.invocation_environment,
- # https://docs.aws.amazon.com/sagemaker/latest/dg/model-train-storage.html#model-train-storage-env-var-summary
- "GRAND_CHALLENGE_COMPONENT_WRITABLE_DIRECTORIES": "/opt/ml/output/data:/opt/ml/model:/opt/ml/checkpoints:/tmp",
},
VpcConfig={
"SecurityGroupIds": [
diff --git a/app/grandchallenge/components/backends/base.py b/app/grandchallenge/components/backends/base.py
index 1c7c71a3c0..b34e1f7b5f 100644
--- a/app/grandchallenge/components/backends/base.py
+++ b/app/grandchallenge/components/backends/base.py
@@ -49,6 +49,7 @@ def __init__(
time_limit: int,
requires_gpu: bool,
desired_gpu_type: GPUTypeChoices,
+ algorithm_model=None,
**kwargs,
):
super().__init__(*args, **kwargs)
@@ -61,11 +62,13 @@ def __init__(
self._stdout = []
self._stderr = []
self.__s3_client = None
+ self._algorithm_model = algorithm_model
def provision(self, *, input_civs, input_prefixes):
self._provision_inputs(
input_civs=input_civs, input_prefixes=input_prefixes
)
+ self._provision_auxilliary_data()
@abstractmethod
def execute(self, *, input_civs, input_prefixes): ...
@@ -101,6 +104,10 @@ def deprovision(self):
bucket=settings.COMPONENTS_OUTPUT_BUCKET_NAME,
prefix=self._io_prefix,
)
+ self._delete_objects(
+ bucket=settings.COMPONENTS_INPUT_BUCKET_NAME,
+ prefix=self._auxiliary_data_prefix,
+ )
@staticmethod
@abstractmethod
@@ -133,11 +140,18 @@ def runtime_metrics(self): ...
@property
def invocation_environment(self):
- return { # Up to 16 pairs
+ env = { # Up to 16 pairs
"LOG_LEVEL": "INFO",
"PYTHONUNBUFFERED": "1",
"no_proxy": "amazonaws.com",
+ "GRAND_CHALLENGE_COMPONENT_WRITABLE_DIRECTORIES": "/opt/ml/output/data:/opt/ml/model:/opt/ml/checkpoints:/tmp",
+ "GRAND_CHALLENGE_COMPONENT_POST_CLEAN_DIRECTORIES": "/opt/ml/output/data:/opt/ml/model",
}
+ if self._algorithm_model:
+ env["GRAND_CHALLENGE_COMPONENT_MODEL"] = (
+ f"s3://{settings.COMPONENTS_INPUT_BUCKET_NAME}/{self._algorithm_model_key}"
+ )
+ return env
@property
def compute_cost_euro_millicents(self):
@@ -175,6 +189,14 @@ def _s3_client(self):
)
return self.__s3_client
+ @property
+ def _auxiliary_data_prefix(self):
+ return safe_join("/auxiliary-data", *self.job_path_parts)
+
+ @property
+ def _algorithm_model_key(self):
+ return safe_join(self._auxiliary_data_prefix, "algorithm-model.tar.gz")
+
def _get_key_and_relative_path(self, *, civ, input_prefixes):
if str(civ.pk) in input_prefixes:
key = safe_join(
@@ -229,6 +251,12 @@ def _provision_inputs(self, *, input_civs, input_prefixes):
Key=key,
)
+ def _provision_auxilliary_data(self):
+ if self._algorithm_model:
+ self._copy_input_file(
+ src=self._algorithm_model, dest_key=self._algorithm_model_key
+ )
+
def _copy_input_file(self, *, src, dest_key):
self._s3_client.copy(
CopySource={"Bucket": src.storage.bucket.name, "Key": src.name},
@@ -369,6 +397,7 @@ def _delete_objects(self, *, bucket, prefix):
prefix.startswith("/io/")
or prefix.startswith("/invocations/")
or prefix.startswith("/training-outputs/")
+ or prefix.startswith("/auxiliary-data/")
) or bucket not in {
settings.COMPONENTS_OUTPUT_BUCKET_NAME,
settings.COMPONENTS_INPUT_BUCKET_NAME,
diff --git a/app/grandchallenge/core/storage.py b/app/grandchallenge/core/storage.py
index 390e59d7bf..484af53f63 100644
--- a/app/grandchallenge/core/storage.py
+++ b/app/grandchallenge/core/storage.py
@@ -165,7 +165,7 @@ def copy_s3_object(
target_key = to_field.storage.get_available_name(
name=target_key, max_length=to_field.field.max_length
)
- extra_args = {"ContentType": mimetype}
+ extra_args = {"ContentType": mimetype, "ChecksumAlgorithm": "SHA256"}
if settings.AWS_S3_OBJECT_PARAMETERS[
"StorageClass"
diff --git a/app/grandchallenge/evaluation/forms.py b/app/grandchallenge/evaluation/forms.py
index 4606799d8a..3eb50a92c3 100644
--- a/app/grandchallenge/evaluation/forms.py
+++ b/app/grandchallenge/evaluation/forms.py
@@ -6,6 +6,7 @@
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist, ValidationError
+from django.db.models import Exists, OuterRef
from django.forms import (
CheckboxInput,
CheckboxSelectMultiple,
@@ -264,6 +265,11 @@ class Meta:
)
+class AlgorithmChoiceField(ModelChoiceField):
+ def label_from_instance(self, obj):
+ return obj.form_field_label()
+
+
submission_fields = (
"creator",
"phase",
@@ -272,6 +278,7 @@ class Meta:
"supplementary_url",
"user_upload",
"algorithm_image",
+ "algorithm_model",
)
@@ -293,6 +300,7 @@ class SubmissionForm(
label="Predictions File",
queryset=None,
)
+ algorithm = AlgorithmChoiceField(queryset=None)
def __init__(self, *args, user, phase: Phase, **kwargs): # noqa: C901
super().__init__(*args, user=user, phase=phase, **kwargs)
@@ -342,19 +350,51 @@ def __init__(self, *args, user, phase: Phase, **kwargs): # noqa: C901
if self._phase.submission_kind == SubmissionKindChoices.ALGORITHM:
del self.fields["user_upload"]
- qs = self.user_active_images_for_phase.order_by("algorithm__title")
+ qs = self.user_algorithms_for_phase.filter(
+ has_active_image=True
+ ).order_by("title")
if self._phase.parent:
- qs = qs.filter(
+ eval_with_active_image_and_model = Evaluation.objects.filter(
submission__phase=self._phase.parent,
- submission__evaluation__status=Evaluation.SUCCESS,
- job__status=Job.SUCCESS,
- ).distinct()
- self.fields["algorithm_image"].queryset = qs
+ status=Evaluation.SUCCESS,
+ submission__algorithm_image__pk=OuterRef(
+ "active_image_pk"
+ ),
+ submission__algorithm_model__pk=OuterRef(
+ "active_model_pk"
+ ),
+ )
+ job_with_active_image_and_model = Job.objects.filter(
+ status=Job.SUCCESS,
+ algorithm_image=OuterRef("active_image_pk"),
+ algorithm_model=OuterRef("active_model_pk"),
+ )
+
+ qs = (
+ qs.annotate(
+ has_successful_job=Exists(
+ job_with_active_image_and_model
+ ),
+ has_successful_eval=Exists(
+ eval_with_active_image_and_model
+ ),
+ )
+ .filter(
+ has_successful_eval=True,
+ has_successful_job=True,
+ )
+ .distinct()
+ )
+
+ self.fields["algorithm"].queryset = qs
+ self.fields["algorithm_image"].widget = HiddenInput()
+ self.fields["algorithm_image"].required = False
+ self.fields["algorithm_model"].widget = HiddenInput()
self._algorithm_inputs = self._phase.algorithm_inputs.all()
self._algorithm_outputs = self._phase.algorithm_outputs.all()
- self.fields["algorithm_image"].help_text = format_lazy(
- "Select one of your algorithms' active images to submit as a solution to this "
+ self.fields["algorithm"].help_text = format_lazy(
+ "Select one of your algorithms to submit as a solution to this "
"challenge. The algorithms need to work with the following inputs: {} "
"and the following outputs: {}. If you have not created your "
"algorithm yet you can "
@@ -370,7 +410,9 @@ def __init__(self, *args, user, phase: Phase, **kwargs): # noqa: C901
),
)
else:
+ del self.fields["algorithm"]
del self.fields["algorithm_image"]
+ del self.fields["algorithm_model"]
self.fields["user_upload"].queryset = get_objects_for_user(
user,
@@ -389,21 +431,33 @@ def clean_phase(self):
)
return phase
- def clean_algorithm_image(self):
- algorithm_image = self.cleaned_data["algorithm_image"]
+ def clean_algorithm(self):
+ algorithm = self.cleaned_data["algorithm"]
+
+ extra_submission_filter = {}
+ extra_evaluation_filter = {}
+ if algorithm.active_model:
+ extra_submission_filter = {
+ "algorithm_model__sha256": algorithm.active_model.sha256
+ }
+ extra_evaluation_filter = {
+ "submission__algorithm_model__sha256": algorithm.active_model.sha256
+ }
if Submission.objects.filter(
- algorithm_image__image_sha256=algorithm_image.image_sha256,
+ algorithm_image__image_sha256=algorithm.active_image.image_sha256,
phase=self._phase,
+ **extra_submission_filter,
).exists():
raise ValidationError(
- "A submission for this algorithm container image "
+ "A submission for this algorithm container image and model "
"for this phase already exists."
)
if (
Evaluation.objects.filter(
- submission__algorithm_image__image_sha256=algorithm_image.image_sha256
+ submission__algorithm_image__image_sha256=algorithm.active_image.image_sha256,
+ **extra_evaluation_filter,
)
.exclude(
status__in=[
@@ -423,7 +477,10 @@ def clean_algorithm_image(self):
"complete."
)
- return algorithm_image
+ self.cleaned_data["algorithm_image"] = algorithm.active_image
+ self.cleaned_data["algorithm_model"] = algorithm.active_model
+
+ return algorithm
def clean_creator(self):
creator = self.cleaned_data["creator"]
diff --git a/app/grandchallenge/evaluation/migrations/0054_submission_algorithm_model.py b/app/grandchallenge/evaluation/migrations/0054_submission_algorithm_model.py
new file mode 100644
index 0000000000..d990f9e814
--- /dev/null
+++ b/app/grandchallenge/evaluation/migrations/0054_submission_algorithm_model.py
@@ -0,0 +1,25 @@
+# Generated by Django 4.2.13 on 2024-05-31 14:20
+
+import django.db.models.deletion
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ("algorithms", "0049_algorithmmodel_job_algorithm_model_and_more"),
+ ("evaluation", "0053_alter_phase_extra_results_columns"),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name="submission",
+ name="algorithm_model",
+ field=models.ForeignKey(
+ blank=True,
+ null=True,
+ on_delete=django.db.models.deletion.SET_NULL,
+ to="algorithms.algorithmmodel",
+ ),
+ ),
+ ]
diff --git a/app/grandchallenge/evaluation/models.py b/app/grandchallenge/evaluation/models.py
index 60e035a281..f3d07e2928 100644
--- a/app/grandchallenge/evaluation/models.py
+++ b/app/grandchallenge/evaluation/models.py
@@ -22,7 +22,7 @@
from guardian.models import GroupObjectPermissionBase, UserObjectPermissionBase
from guardian.shortcuts import assign_perm, remove_perm
-from grandchallenge.algorithms.models import AlgorithmImage
+from grandchallenge.algorithms.models import AlgorithmImage, AlgorithmModel
from grandchallenge.archives.models import Archive, ArchiveItem
from grandchallenge.challenges.models import Challenge
from grandchallenge.components.models import (
@@ -1064,6 +1064,9 @@ class Submission(UUIDModel):
algorithm_image = models.ForeignKey(
AlgorithmImage, null=True, on_delete=models.SET_NULL
)
+ algorithm_model = models.ForeignKey(
+ AlgorithmModel, null=True, blank=True, on_delete=models.SET_NULL
+ )
user_upload = models.ForeignKey(
UserUpload, blank=True, null=True, on_delete=models.SET_NULL
)
diff --git a/app/grandchallenge/evaluation/tasks.py b/app/grandchallenge/evaluation/tasks.py
index 8d965683fe..32b4fcce6d 100644
--- a/app/grandchallenge/evaluation/tasks.py
+++ b/app/grandchallenge/evaluation/tasks.py
@@ -223,6 +223,7 @@ def retry_with_delay():
):
jobs = create_algorithm_jobs(
algorithm_image=evaluation.submission.algorithm_image,
+ algorithm_model=evaluation.submission.algorithm_model,
civ_sets=[
{*ai.values.all()}
for ai in evaluation.submission.phase.archive.items.prefetch_related(
diff --git a/app/grandchallenge/evaluation/templates/evaluation/evaluation_detail.html b/app/grandchallenge/evaluation/templates/evaluation/evaluation_detail.html
index 3ed222733f..936021e871 100644
--- a/app/grandchallenge/evaluation/templates/evaluation/evaluation_detail.html
+++ b/app/grandchallenge/evaluation/templates/evaluation/evaluation_detail.html
@@ -74,7 +74,11 @@ Evaluation
{{ object.submission.algorithm_image.algorithm.title }}
- (Version {{ object.submission.algorithm_image.pk }})
+ (Image Version {{ object.submission.algorithm_image.pk }}
+ {% if object.submission.algorithm_model %}
+ Model version {{ object.submission.algorithm_model.pk }}
+ {% endif %}
+ )
{% endif %}
diff --git a/app/tests/algorithms_tests/factories.py b/app/tests/algorithms_tests/factories.py
index 95f64081af..8f93d0f577 100644
--- a/app/tests/algorithms_tests/factories.py
+++ b/app/tests/algorithms_tests/factories.py
@@ -3,6 +3,7 @@
from grandchallenge.algorithms.models import (
Algorithm,
AlgorithmImage,
+ AlgorithmModel,
AlgorithmPermissionRequest,
Job,
)
@@ -34,6 +35,16 @@ class Meta:
image_sha256 = factory.sequence(lambda n: hash_sha256(f"image{n}"))
+class AlgorithmModelFactory(factory.django.DjangoModelFactory):
+ class Meta:
+ model = AlgorithmModel
+
+ algorithm = factory.SubFactory(AlgorithmFactory)
+ creator = factory.SubFactory(UserFactory)
+ model = factory.django.FileField()
+ sha256 = factory.sequence(lambda n: hash_sha256(f"image{n}"))
+
+
class AlgorithmJobFactory(factory.django.DjangoModelFactory):
class Meta:
model = Job
diff --git a/app/tests/algorithms_tests/resources/model.tar.gz b/app/tests/algorithms_tests/resources/model.tar.gz
new file mode 100644
index 0000000000..2b4f763b3c
Binary files /dev/null and b/app/tests/algorithms_tests/resources/model.tar.gz differ
diff --git a/app/tests/algorithms_tests/test_forms.py b/app/tests/algorithms_tests/test_forms.py
index 6a8c104154..1d1184367e 100644
--- a/app/tests/algorithms_tests/test_forms.py
+++ b/app/tests/algorithms_tests/test_forms.py
@@ -1,8 +1,11 @@
+from pathlib import Path
+
import pytest
from actstream.actions import is_following
from grandchallenge.algorithms.forms import (
AlgorithmForm,
+ AlgorithmModelForm,
AlgorithmPublishForm,
ImageActivateForm,
JobCreateForm,
@@ -21,15 +24,21 @@
from grandchallenge.core.utils.access_requests import (
AccessRequestHandlingOptions,
)
+from grandchallenge.verifications.models import Verification
from tests.algorithms_tests.factories import (
AlgorithmFactory,
AlgorithmImageFactory,
AlgorithmJobFactory,
+ AlgorithmModelFactory,
AlgorithmPermissionRequestFactory,
)
from tests.algorithms_tests.utils import get_algorithm_creator
from tests.components_tests.factories import ComponentInterfaceFactory
from tests.factories import UserFactory, WorkstationFactory
+from tests.uploads_tests.factories import (
+ UserUploadFactory,
+ create_upload_from_file,
+)
from tests.utils import get_view_for_user
from tests.verification_tests.factories import VerificationFactory
@@ -521,3 +530,39 @@ def test_image_activate_form():
assert "Image updating already in progress." in str(
form.errors["algorithm_image"]
)
+
+
+@pytest.mark.django_db
+def test_algorithm_model_form():
+ user = UserFactory()
+ alg = AlgorithmFactory()
+ user_upload = UserUploadFactory(creator=user)
+ user_upload.status = user_upload.StatusChoices.COMPLETED
+ user_upload.save()
+
+ form = AlgorithmModelForm(
+ user=user,
+ algorithm=alg,
+ data={"user_upload": user_upload, "creator": user, "algorithm": alg},
+ )
+ assert not form.is_valid()
+ assert "This upload is not a valid .tar.gz file" in str(form.errors)
+ assert "Select a valid choice" in str(form.errors["creator"])
+
+ Verification.objects.create(user=user, is_verified=True)
+ upload = create_upload_from_file(
+ creator=user,
+ file_path=Path(__file__).parent / "resources" / "model.tar.gz",
+ )
+ AlgorithmModelFactory(creator=user)
+
+ form2 = AlgorithmModelForm(
+ user=user,
+ algorithm=alg,
+ data={"user_upload": upload, "creator": user, "algorithm": alg},
+ )
+ assert not form2.is_valid()
+ assert (
+ "You have an existing model importing, please wait for it to complete"
+ in str(form2.errors)
+ )
diff --git a/app/tests/algorithms_tests/test_tasks.py b/app/tests/algorithms_tests/test_tasks.py
index 58aa09b8eb..a42d923586 100644
--- a/app/tests/algorithms_tests/test_tasks.py
+++ b/app/tests/algorithms_tests/test_tasks.py
@@ -11,6 +11,7 @@
from grandchallenge.algorithms.models import Job
from grandchallenge.algorithms.tasks import (
+ assign_algorithm_model_from_upload,
create_algorithm_jobs,
execute_algorithm_job_for_inputs,
filter_civs_for_algorithm,
@@ -21,6 +22,7 @@
from grandchallenge.components.models import (
ComponentInterface,
ComponentInterfaceValue,
+ ImportStatusChoices,
InterfaceKindChoices,
)
from grandchallenge.components.tasks import (
@@ -28,8 +30,10 @@
)
from grandchallenge.notifications.models import Notification
from tests.algorithms_tests.factories import (
+ AlgorithmFactory,
AlgorithmImageFactory,
AlgorithmJobFactory,
+ AlgorithmModelFactory,
)
from tests.cases_tests.factories import RawImageUploadSessionFactory
from tests.components_tests.factories import (
@@ -42,7 +46,10 @@
ImageFileFactory,
UserFactory,
)
-from tests.uploads_tests.factories import UserUploadFactory
+from tests.uploads_tests.factories import (
+ UserUploadFactory,
+ create_upload_from_file,
+)
from tests.utils import get_view_for_user, recurse_callbacks
@@ -762,3 +769,46 @@ def test_setting_credits_per_job(
alg.refresh_from_db()
assert alg.credits_per_job == test["credits"]
+
+
+@pytest.mark.django_db()
+def test_assign_algorithm_model_from_upload(settings):
+ # Override the celery settings
+ settings.task_eager_propagates = (True,)
+ settings.task_always_eager = (True,)
+
+ user = UserFactory()
+ alg = AlgorithmFactory()
+ upload = create_upload_from_file(
+ creator=user,
+ file_path=Path(__file__).parent / "resources" / "model.tar.gz",
+ )
+ model = AlgorithmModelFactory(
+ algorithm=alg, creator=user, user_upload=upload
+ )
+ assert model.is_desired_version is False
+
+ assign_algorithm_model_from_upload(
+ algorithm_model_pk=model.pk,
+ )
+ model.refresh_from_db()
+ assert model.is_desired_version
+ assert model.import_status == ImportStatusChoices.COMPLETED
+
+ upload2 = create_upload_from_file(
+ creator=user,
+ file_path=Path(__file__).parent / "resources" / "model.tar.gz",
+ )
+ model2 = AlgorithmModelFactory(
+ algorithm=alg, creator=user, user_upload=upload2
+ )
+ assign_algorithm_model_from_upload(
+ algorithm_model_pk=model2.pk,
+ )
+ model2.refresh_from_db()
+ assert not model2.is_desired_version
+ assert model2.import_status == ImportStatusChoices.FAILED
+ assert model2.status == "Algorithm model with this sha256 already exists."
+ assert not model2.user_upload
+ with pytest.raises(ValueError):
+ model2.model.file
diff --git a/app/tests/algorithms_tests/test_views.py b/app/tests/algorithms_tests/test_views.py
index eae8492793..a4423089be 100644
--- a/app/tests/algorithms_tests/test_views.py
+++ b/app/tests/algorithms_tests/test_views.py
@@ -22,6 +22,7 @@
AlgorithmFactory,
AlgorithmImageFactory,
AlgorithmJobFactory,
+ AlgorithmModelFactory,
AlgorithmPermissionRequestFactory,
)
from tests.cases_tests import RESOURCE_PATH
@@ -326,6 +327,7 @@ def test_algorithm_jobs_list_view(client):
class TestObjectPermissionRequiredViews:
def test_permission_required_views(self, client):
ai = AlgorithmImageFactory(is_manifest_valid=True, is_in_registry=True)
+ am = AlgorithmModelFactory()
u = UserFactory()
j = AlgorithmJobFactory(algorithm_image=ai, status=Job.SUCCESS)
p = AlgorithmPermissionRequestFactory(algorithm=ai.algorithm)
@@ -442,6 +444,20 @@ def test_permission_required_views(self, client):
ai.algorithm,
None,
),
+ (
+ "model-create",
+ {"slug": am.algorithm.slug},
+ "change_algorithm",
+ am.algorithm,
+ None,
+ ),
+ (
+ "model-detail",
+ {"slug": am.algorithm.slug, "pk": am.pk},
+ "view_algorithmmodel",
+ am,
+ None,
+ ),
]:
def _get_view():
@@ -1242,3 +1258,45 @@ def test_evaluations_are_filtered(client):
)
assert [*response.context["best_evaluation_per_phase"]] == [e, e2]
+
+
+@pytest.mark.django_db
+def test_job_create_denied_for_same_input_model_and_image(client):
+ creator = UserFactory()
+ VerificationFactory(user=creator, is_verified=True)
+ alg = AlgorithmFactory()
+ alg.add_editor(user=creator)
+ ci = ComponentInterfaceFactory(
+ kind=InterfaceKind.InterfaceKindChoices.IMAGE
+ )
+ alg.inputs.set([ci])
+ ai = AlgorithmImageFactory(
+ algorithm=alg,
+ is_manifest_valid=True,
+ is_in_registry=True,
+ is_desired_version=True,
+ )
+ am = AlgorithmModelFactory(algorithm=alg, is_desired_version=True)
+ im = ImageFactory()
+ assign_perm("view_image", creator, im)
+ civ = ComponentInterfaceValueFactory(interface=ci, image=im)
+ j = AlgorithmJobFactory(algorithm_image=ai, algorithm_model=am)
+ j.inputs.set([civ])
+ response = get_view_for_user(
+ viewname="algorithms:job-create",
+ client=client,
+ method=client.post,
+ reverse_kwargs={
+ "slug": alg.slug,
+ },
+ user=creator,
+ data={
+ ci.slug: im.pk,
+ f"WidgetChoice-{ci.slug}": WidgetChoices.IMAGE_SEARCH.name,
+ },
+ )
+ assert not response.context["form"].is_valid()
+ assert (
+ "A result for these inputs with the current image and model already exists."
+ in str(response.context["form"].errors)
+ )
diff --git a/app/tests/components_tests/test_amazon_sagemaker_training_backend.py b/app/tests/components_tests/test_amazon_sagemaker_training_backend.py
index 6c69b36850..c6de90eb88 100644
--- a/app/tests/components_tests/test_amazon_sagemaker_training_backend.py
+++ b/app/tests/components_tests/test_amazon_sagemaker_training_backend.py
@@ -160,6 +160,7 @@ def test_execute(settings):
executor = AmazonSageMakerTrainingExecutor(
job_id=f"algorithms-job-{pk}",
exec_image_repo_tag="",
+ algorithm_model=None,
memory_limit=4,
time_limit=60,
requires_gpu=False,
@@ -196,6 +197,7 @@ def test_execute(settings):
"PYTHONUNBUFFERED": "1",
"no_proxy": "amazonaws.com",
"GRAND_CHALLENGE_COMPONENT_WRITABLE_DIRECTORIES": "/opt/ml/output/data:/opt/ml/model:/opt/ml/checkpoints:/tmp",
+ "GRAND_CHALLENGE_COMPONENT_POST_CLEAN_DIRECTORIES": "/opt/ml/output/data:/opt/ml/model",
},
"VpcConfig": {
"SecurityGroupIds": [
diff --git a/app/tests/evaluation_tests/test_forms.py b/app/tests/evaluation_tests/test_forms.py
index bb8303ebc5..b94c16b8ae 100644
--- a/app/tests/evaluation_tests/test_forms.py
+++ b/app/tests/evaluation_tests/test_forms.py
@@ -2,7 +2,7 @@
from factory.django import ImageField
from grandchallenge.algorithms.forms import AlgorithmForPhaseForm
-from grandchallenge.algorithms.models import AlgorithmImage, Job
+from grandchallenge.algorithms.models import Job
from grandchallenge.evaluation.forms import (
ConfigureAlgorithmPhasesForm,
SubmissionForm,
@@ -15,6 +15,7 @@
AlgorithmFactory,
AlgorithmImageFactory,
AlgorithmJobFactory,
+ AlgorithmModelFactory,
)
from tests.archives_tests.factories import ArchiveFactory, ArchiveItemFactory
from tests.components_tests.factories import (
@@ -50,7 +51,7 @@ def test_setting_predictions_file(self):
assert "algorithm_image" not in form.fields
assert "user_upload" in form.fields
- def test_setting_algorithm_image(self):
+ def test_setting_algorithm(self):
form = SubmissionForm(
user=UserFactory(),
phase=PhaseFactory(
@@ -59,26 +60,31 @@ def test_setting_algorithm_image(self):
)
assert "algorithm_image" in form.fields
+ assert "algorithm" in form.fields
+ assert "algorithm_model" in form.fields
assert "user_upload" not in form.fields
- def test_algorithm_image_queryset(self):
+ def test_algorithm_queryset(self):
editor = UserFactory()
- alg1, alg2, alg3 = AlgorithmFactory.create_batch(3)
+ alg1, alg2, alg3, alg4 = AlgorithmFactory.create_batch(4)
alg1.add_editor(editor)
alg2.add_editor(editor)
+ alg4.add_editor(editor)
ci1, ci2, ci3, ci4 = ComponentInterfaceFactory.create_batch(4)
alg1.inputs.set([ci1, ci2])
alg1.outputs.set([ci3, ci4])
alg3.inputs.set([ci1, ci2])
alg3.outputs.set([ci3, ci4])
+ alg4.inputs.set([ci1, ci2])
+ alg4.outputs.set([ci3, ci4])
for alg in [alg1, alg2, alg3]:
- AlgorithmImageFactory(algorithm=alg)
AlgorithmImageFactory(
algorithm=alg,
is_in_registry=True,
is_desired_version=True,
is_manifest_valid=True,
)
+ AlgorithmImageFactory(algorithm=alg4)
p = PhaseFactory(submission_kind=SubmissionKindChoices.ALGORITHM)
p.algorithm_inputs.set([ci1, ci2])
p.algorithm_outputs.set([ci3, ci4])
@@ -87,32 +93,38 @@ def test_algorithm_image_queryset(self):
phase=p,
)
- assert alg1.active_image in form.fields["algorithm_image"].queryset
- assert alg2.active_image not in form.fields["algorithm_image"].queryset
- assert alg3.active_image not in form.fields["algorithm_image"].queryset
- for im in AlgorithmImage.objects.exclude(
- pk__in=[
- alg1.active_image.pk,
- alg2.active_image.pk,
- alg3.active_image.pk,
- ]
- ).all():
- assert im not in form.fields["algorithm_image"].queryset
+ assert alg1 in form.fields["algorithm"].queryset
+ assert alg2 not in form.fields["algorithm"].queryset
+ assert alg2 not in form.fields["algorithm"].queryset
+ assert alg4 not in form.fields["algorithm"].queryset
- def test_algorithm_image_queryset_if_parent_phase_exists(self):
+ def test_algorithm_queryset_if_parent_phase_exists(self):
editor = UserFactory()
- alg = AlgorithmFactory()
- ci1, ci2, ci3, ci4 = ComponentInterfaceFactory.create_batch(4)
- alg.add_editor(editor)
- alg.inputs.set([ci1, ci2])
- alg.outputs.set([ci3, ci4])
- ai1, ai2, ai3, ai4 = AlgorithmImageFactory.create_batch(
- 4,
- algorithm=alg,
- is_in_registry=True,
- is_desired_version=True,
- is_manifest_valid=True,
+ alg1, alg2, alg3, alg4, alg5, alg6, alg7, alg8, alg9 = (
+ AlgorithmFactory.create_batch(9)
)
+ ci1, ci2, ci3, ci4 = ComponentInterfaceFactory.create_batch(4)
+ for alg in [alg1, alg2, alg3, alg4, alg5, alg6, alg7, alg8, alg9]:
+ alg.add_editor(editor)
+ alg.inputs.set([ci1, ci2])
+ alg.outputs.set([ci3, ci4])
+ AlgorithmImageFactory(
+ algorithm=alg,
+ is_in_registry=True,
+ is_desired_version=True,
+ is_manifest_valid=True,
+ )
+ for alg in [alg1, alg2, alg8, alg9]:
+ AlgorithmModelFactory(algorithm=alg, is_desired_version=True)
+ ai_inactive = AlgorithmImageFactory(
+ algorithm=alg6,
+ )
+ for alg in [alg1, alg2, alg3, alg4, alg5, alg6]:
+ AlgorithmJobFactory(
+ algorithm_image=alg.active_image,
+ algorithm_model=alg.active_model,
+ status=Job.SUCCESS,
+ )
p_parent, p_child = PhaseFactory.create_batch(
2,
@@ -125,48 +137,105 @@ def test_algorithm_image_queryset_if_parent_phase_exists(self):
p_child.parent = p_parent
p_child.save()
+ # successful eval to parent phase with active image and model
EvaluationFactory(
submission__phase=p_parent,
- submission__algorithm_image=ai1,
+ submission__algorithm_image=alg1.active_image,
+ submission__algorithm_model=alg1.active_model,
status=Evaluation.SUCCESS,
)
+ # successful eval to parent phase with active image, but not active model
+ EvaluationFactory(
+ submission__phase=p_parent,
+ submission__algorithm_image=alg2.active_image,
+ status=Evaluation.SUCCESS,
+ )
+ # successful eval to other phase with active image
EvaluationFactory(
submission__phase=PhaseFactory(),
- submission__algorithm_image=ai2,
+ submission__algorithm_image=alg3.active_image,
status=Evaluation.SUCCESS,
)
+ # failed eval to parent phase with active image
EvaluationFactory(
submission__phase=p_parent,
- submission__algorithm_image=ai3,
+ submission__algorithm_image=alg4.active_image,
status=Evaluation.FAILURE,
)
+ # successful eval to parent phase with active image, but no successful job
EvaluationFactory(
submission__phase=p_parent,
- submission__algorithm_image=ai4,
+ submission__algorithm_image=alg5.active_image,
status=Evaluation.SUCCESS,
)
- AlgorithmJobFactory(algorithm_image=ai1, status=Job.SUCCESS)
+ # successful eval to parent phase with active image, but not active model
+ EvaluationFactory(
+ submission__phase=p_parent,
+ submission__algorithm_image=ai_inactive,
+ status=Evaluation.SUCCESS,
+ )
+ # successful eval to parent phase with active image but failed job
+ EvaluationFactory(
+ submission__phase=p_parent,
+ submission__algorithm_image=alg7.active_image,
+ submission__algorithm_model=alg7.active_model,
+ status=Evaluation.SUCCESS,
+ )
+ AlgorithmJobFactory(
+ algorithm_image=alg.active_image,
+ algorithm_model=alg.active_model,
+ status=Job.FAILURE,
+ )
+ # successful eval to parent phase with active image but successful job with different image
+ EvaluationFactory(
+ submission__phase=p_parent,
+ submission__algorithm_image=alg8.active_image,
+ submission__algorithm_model=alg8.active_model,
+ status=Evaluation.SUCCESS,
+ )
+ AlgorithmJobFactory(
+ algorithm_image=AlgorithmImageFactory(algorithm=alg8),
+ algorithm_model=alg.active_model,
+ status=Job.SUCCESS,
+ )
+ # successful eval to parent phase with active image but successful job with different model
+ EvaluationFactory(
+ submission__phase=p_parent,
+ submission__algorithm_image=alg9.active_image,
+ submission__algorithm_model=alg9.active_model,
+ status=Evaluation.SUCCESS,
+ )
+ AlgorithmJobFactory(
+ algorithm_image=alg9.active_image,
+ algorithm_model=AlgorithmModelFactory(algorithm=alg9),
+ status=Job.SUCCESS,
+ )
form = SubmissionForm(
user=editor,
phase=p,
)
- assert ai1 in form.fields["algorithm_image"].queryset
- assert ai2 not in form.fields["algorithm_image"].queryset
- assert ai3 not in form.fields["algorithm_image"].queryset
- assert ai4 not in form.fields["algorithm_image"].queryset
+ assert alg1 in form.fields["algorithm"].queryset
+ assert alg2 not in form.fields["algorithm"].queryset
+ assert alg3 not in form.fields["algorithm"].queryset
+ assert alg4 not in form.fields["algorithm"].queryset
+ assert alg5 not in form.fields["algorithm"].queryset
+ assert alg6 not in form.fields["algorithm"].queryset
+ assert alg7 not in form.fields["algorithm"].queryset
+ assert alg8 not in form.fields["algorithm"].queryset
+ assert alg9 not in form.fields["algorithm"].queryset
- def test_no_algorithm_image_selection(self):
+ def test_no_algorithm_selection(self):
form = SubmissionForm(
user=UserFactory(),
phase=PhaseFactory(
submission_kind=SubmissionKindChoices.ALGORITHM
),
- data={"algorithm_image": ""},
+ data={"algorithm": ""},
)
- assert form.errors["algorithm_image"] == ["This field is required."]
+ assert form.errors["algorithm"] == ["This field is required."]
def test_algorithm_no_permission(self):
form = SubmissionForm(
@@ -174,10 +243,10 @@ def test_algorithm_no_permission(self):
phase=PhaseFactory(
submission_kind=SubmissionKindChoices.ALGORITHM
),
- data={"algorithm_image": AlgorithmImageFactory()},
+ data={"algorithm": AlgorithmFactory()},
)
- assert form.errors["algorithm_image"] == [
+ assert form.errors["algorithm"] == [
"Select a valid choice. That choice is not one of the available choices."
]
@@ -222,12 +291,61 @@ def test_algorithm_with_permission(self):
form = SubmissionForm(
user=user,
phase=p,
- data={"algorithm_image": ai.pk, "creator": user, "phase": p},
+ data={"algorithm": alg, "creator": user, "phase": p},
)
assert form.errors == {}
- assert "algorithm_image" not in form.errors
+ assert "algorithm" not in form.errors
+ assert form.is_valid()
+
+ def test_algorithm_image_and_model_set(self):
+ user = UserFactory()
+ alg = AlgorithmFactory()
+ alg.add_editor(user=user)
+ ci1 = ComponentInterfaceFactory()
+ ci2 = ComponentInterfaceFactory()
+ alg.inputs.set([ci1])
+ alg.outputs.set([ci2])
+ archive = ArchiveFactory()
+ p = PhaseFactory(
+ submission_kind=SubmissionKindChoices.ALGORITHM,
+ submissions_limit_per_user_per_period=10,
+ archive=archive,
+ )
+ p.algorithm_inputs.set([ci1])
+ p.algorithm_outputs.set([ci2])
+ civ = ComponentInterfaceValueFactory(interface=ci1)
+ i = ArchiveItemFactory(archive=p.archive)
+ i.values.add(civ)
+
+ InvoiceFactory(
+ challenge=p.challenge,
+ compute_costs_euros=10,
+ payment_status=PaymentStatusChoices.COMPLIMENTARY,
+ )
+
+ # Fetch from the db to get the cost annotations
+ # Maybe this is solved with GeneratedField (Django 5)?
+ p = Phase.objects.get(pk=p.pk)
+
+ ai = AlgorithmImageFactory(
+ is_manifest_valid=True,
+ is_in_registry=True,
+ is_desired_version=True,
+ algorithm=alg,
+ )
+ am = AlgorithmModelFactory(algorithm=alg, is_desired_version=True)
+ AlgorithmJobFactory(algorithm_image=ai, status=Job.SUCCESS)
+
+ form = SubmissionForm(
+ user=user,
+ phase=p,
+ data={"algorithm": alg, "creator": user, "phase": p},
+ )
+
assert form.is_valid()
+ assert ai == form.cleaned_data["algorithm_image"]
+ assert am == form.cleaned_data["algorithm_model"]
def test_user_no_verification(self):
user = UserFactory()
@@ -324,7 +442,7 @@ def test_no_valid_archive_items(self):
form2 = SubmissionForm(
user=user,
phase=p_alg,
- data={"algorithm_image": ai.pk, "creator": user, "phase": p_alg},
+ data={"algorithm": alg, "creator": user, "phase": p_alg},
)
assert (
@@ -340,7 +458,7 @@ def test_no_valid_archive_items(self):
form3 = SubmissionForm(
user=user,
phase=p_alg,
- data={"algorithm_image": ai.pk, "creator": user, "phase": p_alg},
+ data={"algorithm": alg, "creator": user, "phase": p_alg},
)
assert form3.is_valid()
@@ -389,13 +507,13 @@ def test_submission_or_eval_exists_for_image(self):
form = SubmissionForm(
user=user,
phase=p,
- data={"algorithm_image": ai.pk, "creator": user, "phase": p},
+ data={"algorithm": alg, "creator": user, "phase": p},
)
assert not form.is_valid()
assert (
- "A submission for this algorithm container image for this phase already exists."
- in form.errors["algorithm_image"]
+ "A submission for this algorithm container image and model for this phase already exists."
+ in form.errors["algorithm"]
)
Submission.objects.all().delete()
@@ -405,13 +523,13 @@ def test_submission_or_eval_exists_for_image(self):
form = SubmissionForm(
user=user,
phase=p,
- data={"algorithm_image": ai.pk, "creator": user, "phase": p},
+ data={"algorithm": alg, "creator": user, "phase": p},
)
assert not form.is_valid()
assert (
"An evaluation for this algorithm is already in progress for another phase. Please wait for the other evaluation to complete."
- in form.errors["algorithm_image"]
+ in form.errors["algorithm"]
)