diff --git a/app/grandchallenge/algorithms/models.py b/app/grandchallenge/algorithms/models.py
index b02cbadf26..747d234668 100644
--- a/app/grandchallenge/algorithms/models.py
+++ b/app/grandchallenge/algorithms/models.py
@@ -502,6 +502,16 @@ def public_test_case(self):
except AttributeError:
return False
+ def form_field_label(self):
+ title = f"{self.title}"
+ if self.active_image:
+ title += f" (Active image: {self.active_image.pk})"
+ if self.active_model:
+ title += f" (Active model: {self.active_model.pk})"
+ else:
+ title += " (Active model: None)"
+ return title
+
class AlgorithmUserObjectPermission(UserObjectPermissionBase):
content_object = models.ForeignKey(Algorithm, on_delete=models.CASCADE)
diff --git a/app/grandchallenge/algorithms/tasks.py b/app/grandchallenge/algorithms/tasks.py
index 7110af45c3..b77dd86afc 100644
--- a/app/grandchallenge/algorithms/tasks.py
+++ b/app/grandchallenge/algorithms/tasks.py
@@ -226,8 +226,8 @@ def retry_with_delay():
def create_algorithm_jobs(
*,
algorithm_image,
- algorithm_model,
civ_sets,
+ algorithm_model=None,
extra_viewer_groups=None,
extra_logs_viewer_groups=None,
max_jobs=None,
diff --git a/app/grandchallenge/evaluation/forms.py b/app/grandchallenge/evaluation/forms.py
index 52aff0bbf2..29fe302c37 100644
--- a/app/grandchallenge/evaluation/forms.py
+++ b/app/grandchallenge/evaluation/forms.py
@@ -264,6 +264,11 @@ class Meta:
)
+class AlgorithmChoiceField(ModelChoiceField):
+ def label_from_instance(self, obj):
+ return obj.form_field_label()
+
+
submission_fields = (
"creator",
"phase",
@@ -294,7 +299,7 @@ class SubmissionForm(
label="Predictions File",
queryset=None,
)
- algorithm = ModelChoiceField(queryset=None)
+ algorithm = AlgorithmChoiceField(queryset=None)
def __init__(self, *args, user, phase: Phase, **kwargs): # noqa: C901
super().__init__(*args, user=user, phase=phase, **kwargs)
diff --git a/app/grandchallenge/evaluation/templates/evaluation/evaluation_detail.html b/app/grandchallenge/evaluation/templates/evaluation/evaluation_detail.html
index 3ed222733f..936021e871 100644
--- a/app/grandchallenge/evaluation/templates/evaluation/evaluation_detail.html
+++ b/app/grandchallenge/evaluation/templates/evaluation/evaluation_detail.html
@@ -74,7 +74,11 @@
Evaluation
{{ object.submission.algorithm_image.algorithm.title }}
- (Version {{ object.submission.algorithm_image.pk }})
+ (Image Version {{ object.submission.algorithm_image.pk }}
+ {% if object.submission.algorithm_model %}
+ Model version {{ object.submission.algorithm_model.pk }}
+ {% endif %}
+ )
{% endif %}
diff --git a/app/tests/algorithms_tests/test_permissions.py b/app/tests/algorithms_tests/test_permissions.py
index 27588ea290..3e9dd8d931 100644
--- a/app/tests/algorithms_tests/test_permissions.py
+++ b/app/tests/algorithms_tests/test_permissions.py
@@ -15,12 +15,10 @@
from grandchallenge.evaluation.tasks import (
create_algorithm_jobs_for_evaluation,
)
-from grandchallenge.verifications.models import Verification
from tests.algorithms_tests.factories import (
AlgorithmFactory,
AlgorithmImageFactory,
AlgorithmJobFactory,
- AlgorithmModelFactory,
)
from tests.algorithms_tests.utils import TwoAlgorithms
from tests.archives_tests.factories import ArchiveFactory, ArchiveItemFactory
@@ -536,53 +534,3 @@ def test_job_permissions_for_debug_phase(
)
# No-one should be in the viewers group
assert {*job.viewers.user_set.all()} == set()
-
-
-@pytest.mark.django_db
-def test_algorithm_model_view_permissions(client):
- user, editor = UserFactory.create_batch(2)
- alg = AlgorithmFactory()
- alg.add_editor(editor)
-
- am = AlgorithmModelFactory(algorithm=alg)
-
- response = get_view_for_user(
- viewname="algorithms:model-create",
- client=client,
- reverse_kwargs={"slug": alg.slug},
- user=user,
- )
- assert response.status_code == 403
-
- response = get_view_for_user(
- viewname="algorithms:model-detail",
- client=client,
- reverse_kwargs={"slug": alg.slug, "pk": am.pk},
- user=user,
- )
- assert response.status_code == 403
-
- response = get_view_for_user(
- viewname="algorithms:model-create",
- client=client,
- reverse_kwargs={"slug": alg.slug},
- user=editor,
- )
- assert response.status_code == 403
-
- Verification.objects.create(user=editor, is_verified=True)
- response = get_view_for_user(
- viewname="algorithms:model-create",
- client=client,
- reverse_kwargs={"slug": alg.slug},
- user=editor,
- )
- assert response.status_code == 200
-
- response = get_view_for_user(
- viewname="algorithms:model-detail",
- client=client,
- reverse_kwargs={"slug": alg.slug, "pk": am.pk},
- user=editor,
- )
- assert response.status_code == 200
diff --git a/app/tests/algorithms_tests/test_views.py b/app/tests/algorithms_tests/test_views.py
index eae8492793..a4423089be 100644
--- a/app/tests/algorithms_tests/test_views.py
+++ b/app/tests/algorithms_tests/test_views.py
@@ -22,6 +22,7 @@
AlgorithmFactory,
AlgorithmImageFactory,
AlgorithmJobFactory,
+ AlgorithmModelFactory,
AlgorithmPermissionRequestFactory,
)
from tests.cases_tests import RESOURCE_PATH
@@ -326,6 +327,7 @@ def test_algorithm_jobs_list_view(client):
class TestObjectPermissionRequiredViews:
def test_permission_required_views(self, client):
ai = AlgorithmImageFactory(is_manifest_valid=True, is_in_registry=True)
+ am = AlgorithmModelFactory()
u = UserFactory()
j = AlgorithmJobFactory(algorithm_image=ai, status=Job.SUCCESS)
p = AlgorithmPermissionRequestFactory(algorithm=ai.algorithm)
@@ -442,6 +444,20 @@ def test_permission_required_views(self, client):
ai.algorithm,
None,
),
+ (
+ "model-create",
+ {"slug": am.algorithm.slug},
+ "change_algorithm",
+ am.algorithm,
+ None,
+ ),
+ (
+ "model-detail",
+ {"slug": am.algorithm.slug, "pk": am.pk},
+ "view_algorithmmodel",
+ am,
+ None,
+ ),
]:
def _get_view():
@@ -1242,3 +1258,45 @@ def test_evaluations_are_filtered(client):
)
assert [*response.context["best_evaluation_per_phase"]] == [e, e2]
+
+
+@pytest.mark.django_db
+def test_job_create_denied_for_same_input_model_and_image(client):
+ creator = UserFactory()
+ VerificationFactory(user=creator, is_verified=True)
+ alg = AlgorithmFactory()
+ alg.add_editor(user=creator)
+ ci = ComponentInterfaceFactory(
+ kind=InterfaceKind.InterfaceKindChoices.IMAGE
+ )
+ alg.inputs.set([ci])
+ ai = AlgorithmImageFactory(
+ algorithm=alg,
+ is_manifest_valid=True,
+ is_in_registry=True,
+ is_desired_version=True,
+ )
+ am = AlgorithmModelFactory(algorithm=alg, is_desired_version=True)
+ im = ImageFactory()
+ assign_perm("view_image", creator, im)
+ civ = ComponentInterfaceValueFactory(interface=ci, image=im)
+ j = AlgorithmJobFactory(algorithm_image=ai, algorithm_model=am)
+ j.inputs.set([civ])
+ response = get_view_for_user(
+ viewname="algorithms:job-create",
+ client=client,
+ method=client.post,
+ reverse_kwargs={
+ "slug": alg.slug,
+ },
+ user=creator,
+ data={
+ ci.slug: im.pk,
+ f"WidgetChoice-{ci.slug}": WidgetChoices.IMAGE_SEARCH.name,
+ },
+ )
+ assert not response.context["form"].is_valid()
+ assert (
+ "A result for these inputs with the current image and model already exists."
+ in str(response.context["form"].errors)
+ )
diff --git a/app/tests/components_tests/test_amazon_sagemaker_training_backend.py b/app/tests/components_tests/test_amazon_sagemaker_training_backend.py
index 6c69b36850..c6de90eb88 100644
--- a/app/tests/components_tests/test_amazon_sagemaker_training_backend.py
+++ b/app/tests/components_tests/test_amazon_sagemaker_training_backend.py
@@ -160,6 +160,7 @@ def test_execute(settings):
executor = AmazonSageMakerTrainingExecutor(
job_id=f"algorithms-job-{pk}",
exec_image_repo_tag="",
+ algorithm_model=None,
memory_limit=4,
time_limit=60,
requires_gpu=False,
@@ -196,6 +197,7 @@ def test_execute(settings):
"PYTHONUNBUFFERED": "1",
"no_proxy": "amazonaws.com",
"GRAND_CHALLENGE_COMPONENT_WRITABLE_DIRECTORIES": "/opt/ml/output/data:/opt/ml/model:/opt/ml/checkpoints:/tmp",
+ "GRAND_CHALLENGE_COMPONENT_POST_CLEAN_DIRECTORIES": "/opt/ml/output/data:/opt/ml/model",
},
"VpcConfig": {
"SecurityGroupIds": [
diff --git a/app/tests/evaluation_tests/test_forms.py b/app/tests/evaluation_tests/test_forms.py
index bb8303ebc5..356408e3bb 100644
--- a/app/tests/evaluation_tests/test_forms.py
+++ b/app/tests/evaluation_tests/test_forms.py
@@ -2,7 +2,7 @@
from factory.django import ImageField
from grandchallenge.algorithms.forms import AlgorithmForPhaseForm
-from grandchallenge.algorithms.models import AlgorithmImage, Job
+from grandchallenge.algorithms.models import Job
from grandchallenge.evaluation.forms import (
ConfigureAlgorithmPhasesForm,
SubmissionForm,
@@ -15,6 +15,7 @@
AlgorithmFactory,
AlgorithmImageFactory,
AlgorithmJobFactory,
+ AlgorithmModelFactory,
)
from tests.archives_tests.factories import ArchiveFactory, ArchiveItemFactory
from tests.components_tests.factories import (
@@ -50,7 +51,7 @@ def test_setting_predictions_file(self):
assert "algorithm_image" not in form.fields
assert "user_upload" in form.fields
- def test_setting_algorithm_image(self):
+ def test_setting_algorithm(self):
form = SubmissionForm(
user=UserFactory(),
phase=PhaseFactory(
@@ -59,26 +60,31 @@ def test_setting_algorithm_image(self):
)
assert "algorithm_image" in form.fields
+ assert "algorithm" in form.fields
+ assert "algorithm_model" in form.fields
assert "user_upload" not in form.fields
- def test_algorithm_image_queryset(self):
+ def test_algorithm_queryset(self):
editor = UserFactory()
- alg1, alg2, alg3 = AlgorithmFactory.create_batch(3)
+ alg1, alg2, alg3, alg4 = AlgorithmFactory.create_batch(4)
alg1.add_editor(editor)
alg2.add_editor(editor)
+ alg4.add_editor(editor)
ci1, ci2, ci3, ci4 = ComponentInterfaceFactory.create_batch(4)
alg1.inputs.set([ci1, ci2])
alg1.outputs.set([ci3, ci4])
alg3.inputs.set([ci1, ci2])
alg3.outputs.set([ci3, ci4])
+ alg4.inputs.set([ci1, ci2])
+ alg4.outputs.set([ci3, ci4])
for alg in [alg1, alg2, alg3]:
- AlgorithmImageFactory(algorithm=alg)
AlgorithmImageFactory(
algorithm=alg,
is_in_registry=True,
is_desired_version=True,
is_manifest_valid=True,
)
+ AlgorithmImageFactory(algorithm=alg4)
p = PhaseFactory(submission_kind=SubmissionKindChoices.ALGORITHM)
p.algorithm_inputs.set([ci1, ci2])
p.algorithm_outputs.set([ci3, ci4])
@@ -87,32 +93,25 @@ def test_algorithm_image_queryset(self):
phase=p,
)
- assert alg1.active_image in form.fields["algorithm_image"].queryset
- assert alg2.active_image not in form.fields["algorithm_image"].queryset
- assert alg3.active_image not in form.fields["algorithm_image"].queryset
- for im in AlgorithmImage.objects.exclude(
- pk__in=[
- alg1.active_image.pk,
- alg2.active_image.pk,
- alg3.active_image.pk,
- ]
- ).all():
- assert im not in form.fields["algorithm_image"].queryset
+ assert alg1 in form.fields["algorithm"].queryset
+ assert alg2 not in form.fields["algorithm"].queryset
+ assert alg2 not in form.fields["algorithm"].queryset
+ assert alg4 not in form.fields["algorithm"].queryset
- def test_algorithm_image_queryset_if_parent_phase_exists(self):
+ def test_algorithm_queryset_if_parent_phase_exists(self):
editor = UserFactory()
- alg = AlgorithmFactory()
+ alg1, alg2, alg3, alg4 = AlgorithmFactory.create_batch(4)
ci1, ci2, ci3, ci4 = ComponentInterfaceFactory.create_batch(4)
- alg.add_editor(editor)
- alg.inputs.set([ci1, ci2])
- alg.outputs.set([ci3, ci4])
- ai1, ai2, ai3, ai4 = AlgorithmImageFactory.create_batch(
- 4,
- algorithm=alg,
- is_in_registry=True,
- is_desired_version=True,
- is_manifest_valid=True,
- )
+ for alg in [alg1, alg2, alg3, alg4]:
+ alg.add_editor(editor)
+ alg.inputs.set([ci1, ci2])
+ alg.outputs.set([ci3, ci4])
+ AlgorithmImageFactory(
+ algorithm=alg,
+ is_in_registry=True,
+ is_desired_version=True,
+ is_manifest_valid=True,
+ )
p_parent, p_child = PhaseFactory.create_batch(
2,
@@ -127,46 +126,48 @@ def test_algorithm_image_queryset_if_parent_phase_exists(self):
EvaluationFactory(
submission__phase=p_parent,
- submission__algorithm_image=ai1,
+ submission__algorithm_image=alg1.active_image,
status=Evaluation.SUCCESS,
)
EvaluationFactory(
submission__phase=PhaseFactory(),
- submission__algorithm_image=ai2,
+ submission__algorithm_image=alg2.active_image,
status=Evaluation.SUCCESS,
)
EvaluationFactory(
submission__phase=p_parent,
- submission__algorithm_image=ai3,
+ submission__algorithm_image=alg3.active_image,
status=Evaluation.FAILURE,
)
EvaluationFactory(
submission__phase=p_parent,
- submission__algorithm_image=ai4,
+ submission__algorithm_image=alg4.active_image,
status=Evaluation.SUCCESS,
)
- AlgorithmJobFactory(algorithm_image=ai1, status=Job.SUCCESS)
+ AlgorithmJobFactory(
+ algorithm_image=alg1.active_image, status=Job.SUCCESS
+ )
form = SubmissionForm(
user=editor,
phase=p,
)
- assert ai1 in form.fields["algorithm_image"].queryset
- assert ai2 not in form.fields["algorithm_image"].queryset
- assert ai3 not in form.fields["algorithm_image"].queryset
- assert ai4 not in form.fields["algorithm_image"].queryset
+ assert alg1 in form.fields["algorithm"].queryset
+ assert alg2 not in form.fields["algorithm"].queryset
+ assert alg3 not in form.fields["algorithm"].queryset
+ assert alg4 not in form.fields["algorithm"].queryset
- def test_no_algorithm_image_selection(self):
+ def test_no_algorithm_selection(self):
form = SubmissionForm(
user=UserFactory(),
phase=PhaseFactory(
submission_kind=SubmissionKindChoices.ALGORITHM
),
- data={"algorithm_image": ""},
+ data={"algorithm": ""},
)
- assert form.errors["algorithm_image"] == ["This field is required."]
+ assert form.errors["algorithm"] == ["This field is required."]
def test_algorithm_no_permission(self):
form = SubmissionForm(
@@ -174,10 +175,10 @@ def test_algorithm_no_permission(self):
phase=PhaseFactory(
submission_kind=SubmissionKindChoices.ALGORITHM
),
- data={"algorithm_image": AlgorithmImageFactory()},
+ data={"algorithm": AlgorithmFactory()},
)
- assert form.errors["algorithm_image"] == [
+ assert form.errors["algorithm"] == [
"Select a valid choice. That choice is not one of the available choices."
]
@@ -222,12 +223,61 @@ def test_algorithm_with_permission(self):
form = SubmissionForm(
user=user,
phase=p,
- data={"algorithm_image": ai.pk, "creator": user, "phase": p},
+ data={"algorithm": alg, "creator": user, "phase": p},
)
assert form.errors == {}
- assert "algorithm_image" not in form.errors
+ assert "algorithm" not in form.errors
+ assert form.is_valid()
+
+ def test_algorithm_image_and_model_set(self):
+ user = UserFactory()
+ alg = AlgorithmFactory()
+ alg.add_editor(user=user)
+ ci1 = ComponentInterfaceFactory()
+ ci2 = ComponentInterfaceFactory()
+ alg.inputs.set([ci1])
+ alg.outputs.set([ci2])
+ archive = ArchiveFactory()
+ p = PhaseFactory(
+ submission_kind=SubmissionKindChoices.ALGORITHM,
+ submissions_limit_per_user_per_period=10,
+ archive=archive,
+ )
+ p.algorithm_inputs.set([ci1])
+ p.algorithm_outputs.set([ci2])
+ civ = ComponentInterfaceValueFactory(interface=ci1)
+ i = ArchiveItemFactory(archive=p.archive)
+ i.values.add(civ)
+
+ InvoiceFactory(
+ challenge=p.challenge,
+ compute_costs_euros=10,
+ payment_status=PaymentStatusChoices.COMPLIMENTARY,
+ )
+
+ # Fetch from the db to get the cost annotations
+ # Maybe this is solved with GeneratedField (Django 5)?
+ p = Phase.objects.get(pk=p.pk)
+
+ ai = AlgorithmImageFactory(
+ is_manifest_valid=True,
+ is_in_registry=True,
+ is_desired_version=True,
+ algorithm=alg,
+ )
+ am = AlgorithmModelFactory(algorithm=alg, is_desired_version=True)
+ AlgorithmJobFactory(algorithm_image=ai, status=4)
+
+ form = SubmissionForm(
+ user=user,
+ phase=p,
+ data={"algorithm": alg, "creator": user, "phase": p},
+ )
+
assert form.is_valid()
+ assert ai == form.cleaned_data["algorithm_image"]
+ assert am == form.cleaned_data["algorithm_model"]
def test_user_no_verification(self):
user = UserFactory()
@@ -324,7 +374,7 @@ def test_no_valid_archive_items(self):
form2 = SubmissionForm(
user=user,
phase=p_alg,
- data={"algorithm_image": ai.pk, "creator": user, "phase": p_alg},
+ data={"algorithm": alg, "creator": user, "phase": p_alg},
)
assert (
@@ -340,7 +390,7 @@ def test_no_valid_archive_items(self):
form3 = SubmissionForm(
user=user,
phase=p_alg,
- data={"algorithm_image": ai.pk, "creator": user, "phase": p_alg},
+ data={"algorithm": alg, "creator": user, "phase": p_alg},
)
assert form3.is_valid()
@@ -389,13 +439,13 @@ def test_submission_or_eval_exists_for_image(self):
form = SubmissionForm(
user=user,
phase=p,
- data={"algorithm_image": ai.pk, "creator": user, "phase": p},
+ data={"algorithm": alg, "creator": user, "phase": p},
)
assert not form.is_valid()
assert (
- "A submission for this algorithm container image for this phase already exists."
- in form.errors["algorithm_image"]
+ "A submission for this algorithm container image and model for this phase already exists."
+ in form.errors["algorithm"]
)
Submission.objects.all().delete()
@@ -405,13 +455,13 @@ def test_submission_or_eval_exists_for_image(self):
form = SubmissionForm(
user=user,
phase=p,
- data={"algorithm_image": ai.pk, "creator": user, "phase": p},
+ data={"algorithm": alg, "creator": user, "phase": p},
)
assert not form.is_valid()
assert (
"An evaluation for this algorithm is already in progress for another phase. Please wait for the other evaluation to complete."
- in form.errors["algorithm_image"]
+ in form.errors["algorithm"]
)