From 8dbe551e9e68da57c1239e4d5ee9d03c80ed7007 Mon Sep 17 00:00:00 2001 From: jmsmkn Date: Mon, 22 Jan 2024 17:50:43 +0000 Subject: [PATCH] deploy: 6e6235672ba2d16a0dc96d03393dacfbaf39cf72 --- .../grandchallenge/reader_studies/models.html | 43 ++++++++++++------- 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/_modules/grandchallenge/reader_studies/models.html b/_modules/grandchallenge/reader_studies/models.html index 3316f0e685..44e4ac3c09 100644 --- a/_modules/grandchallenge/reader_studies/models.html +++ b/_modules/grandchallenge/reader_studies/models.html @@ -638,13 +638,10 @@

Source code for grandchallenge.reader_studies.models

for key in gt.keys(): if key == "case" or key.endswith("__explanation"): continue - question = self.questions.get(question_text=key) _answer = json.loads(gt[key]) - if _answer is None and question.required is False: continue - if question.answer_type == Question.AnswerType.CHOICE: try: option = question.options.get(title=_answer) @@ -653,14 +650,15 @@

Source code for grandchallenge.reader_studies.models

raise ValidationError( f"Option {_answer!r} is not valid for question {question.question_text}" ) - - if question.answer_type == Question.AnswerType.MULTIPLE_CHOICE: + if question.answer_type in ( + Question.AnswerType.MULTIPLE_CHOICE, + Question.AnswerType.MULTIPLE_CHOICE_DROPDOWN, + ): _answer = list( question.options.filter(title__in=_answer).values_list( "pk", flat=True ) ) - kwargs = { "creator": user, "question": question, @@ -842,10 +840,10 @@

Source code for grandchallenge.reader_studies.models

field = gt["display_set_id"] ground_truths[field] = ground_truths.get(field, {}) - if ( - gt["question__answer_type"] - == Question.AnswerType.MULTIPLE_CHOICE - ): + if gt["question__answer_type"] in [ + Question.AnswerType.MULTIPLE_CHOICE, + Question.AnswerType.MULTIPLE_CHOICE_DROPDOWN, + ]: human_readable_answers = [ options[gt["question"]].get(a, a) for a in gt["answer"] ] @@ -1077,6 +1075,7 @@

Source code for grandchallenge.reader_studies.models

MULTIPLE_POLYGONS = "MPOL", "Multiple polygons" CHOICE = "CHOI", "Choice" MULTIPLE_CHOICE = "MCHO", "Multiple choice" + MULTIPLE_CHOICE_DROPDOWN = "MCHD", "Multiple choice dropdown" MASK = "MASK", "Mask" LINE = "LINE", "Line" MULTIPLE_LINES = "MLIN", "Multiple lines" @@ -1092,6 +1091,7 @@

Source code for grandchallenge.reader_studies.models

return [ AnswerType.CHOICE, AnswerType.MULTIPLE_CHOICE, + AnswerType.MULTIPLE_CHOICE_DROPDOWN, ] @staticmethod @@ -1157,6 +1157,9 @@

Source code for grandchallenge.reader_studies.models

AnswerType.MULTIPLE_LINES: [InterfaceKindChoices.MULTIPLE_LINES], AnswerType.CHOICE: [InterfaceKindChoices.CHOICE], AnswerType.MULTIPLE_CHOICE: [InterfaceKindChoices.MULTIPLE_CHOICE], + AnswerType.MULTIPLE_CHOICE_DROPDOWN: [ + InterfaceKindChoices.MULTIPLE_CHOICE + ], AnswerType.MASK: [ InterfaceKindChoices.SEGMENTATION, ], @@ -1220,6 +1223,7 @@

Source code for grandchallenge.reader_studies.models

QuestionWidgetKindChoices.CHECKBOX_SELECT_MULTIPLE, QuestionWidgetKindChoices.SELECT_MULTIPLE, ], + AnswerType.MULTIPLE_CHOICE_DROPDOWN: [], AnswerType.MASK: [], AnswerType.LINE: [], AnswerType.MULTIPLE_LINES: [QuestionWidgetKindChoices.ACCEPT_REJECT], @@ -1259,6 +1263,7 @@

Source code for grandchallenge.reader_studies.models

AnswerType.ELLIPSE: None, AnswerType.MULTIPLE_ELLIPSES: None, AnswerType.MULTIPLE_CHOICE: [], + AnswerType.MULTIPLE_CHOICE_DROPDOWN: [], AnswerType.THREE_POINT_ANGLE: None, AnswerType.MULTIPLE_THREE_POINT_ANGLES: None, } @@ -1317,6 +1322,7 @@

Source code for grandchallenge.reader_studies.models

AnswerType.BOOL: "'true'", AnswerType.CHOICE: "'\"option\"'", AnswerType.MULTIPLE_CHOICE: '\'["option1", "option2"]\'', + AnswerType.MULTIPLE_CHOICE_DROPDOWN: '\'["option1", "option2"]\'', } reader_study = models.ForeignKey( @@ -1456,7 +1462,10 @@

Source code for grandchallenge.reader_studies.models

Calculates the score for ``answer`` by applying ``scoring_function`` to ``answer`` and ``ground_truth``. """ - if self.answer_type == Question.AnswerType.MULTIPLE_CHOICE: + if self.answer_type in ( + Question.AnswerType.MULTIPLE_CHOICE, + Question.AnswerType.MULTIPLE_CHOICE_DROPDOWN, + ): if len(answer) == 0 and len(ground_truth) == 0: return 1.0 @@ -1857,7 +1866,10 @@

Source code for grandchallenge.reader_studies.models

"Provided option is not valid for this question" ) - if question.answer_type == Question.AnswerType.MULTIPLE_CHOICE: + if question.answer_type in ( + Question.AnswerType.MULTIPLE_CHOICE, + Question.AnswerType.MULTIPLE_CHOICE_DROPDOWN, + ): if not all(x in valid_options for x in answer): raise ValidationError( "Provided options are not valid for this question" @@ -1888,14 +1900,15 @@

Source code for grandchallenge.reader_studies.models

.first() or "" ) - - if self.question.answer_type == Question.AnswerType.MULTIPLE_CHOICE: + if self.question.answer_type in ( + Question.AnswerType.MULTIPLE_CHOICE, + Question.AnswerType.MULTIPLE_CHOICE_DROPDOWN, + ): return ", ".join( self.question.options.filter(pk__in=self.answer) .order_by("title") .values_list("title", flat=True) ) - return self.answer