Skip to content

Commit

Permalink
refactor: reduce duplicate workflow update calls (openedx#2129)
Browse files Browse the repository at this point in the history
* feat: remove duplicate workflow lookups

Previously, anything that requested a workflow would cause a workflow
update and refresh. This changes that logic to:
1. Only init workflow API once through load of ORA block (reloaded each
time the page / ORA MFE load).
2. On load, update workflow.
3. Calls for workflow info reference cached workflow.
4. Try to update workflow when none found.

Note we can still  manually refresh workflow info with either
get_workflow_info or update_workflow_status.

* refactor: call the API version of get_workflow_info

Within the new MFE calls, we need to consistently leverage the API version of workflow calls to take advantage of the cached workflow state.

* docs: typo fixes and docstring updates

* feat: remove no-longer-needed function_trace

* fix: cache workflow on update_workflow_status

* fix: remove unnecessary update in tests

* style: more consistent return statements

* test: refactor mocking to fix tests

* chore: bump ORA to 6.0.28
  • Loading branch information
nsprenkle authored Jan 22, 2024
1 parent 0c02eb0 commit c29b7f3
Show file tree
Hide file tree
Showing 15 changed files with 101 additions and 66 deletions.
2 changes: 1 addition & 1 deletion openassessment/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
Initialization Information for Open Assessment Module
"""

__version__ = '6.0.27'
__version__ = '6.0.28'
2 changes: 1 addition & 1 deletion openassessment/workflow/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ def update_from_assessments(
This is a little wonky from a REST, get-doesn't-change-state point of view,
except that what's stored in the `AssessmentWorkflow` isn't the canonical
true value -- it's just the most recently known state of it based on the
last known requirments. For now, we have to query for truth.
last known requirements. For now, we have to query for truth.
Args:
submission_uuid (str): Identifier for the submission the
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def _parse_example(self, example):
if not example:
return (
{},
"No training example was returned fromt he API for student with Submission UUID {}".format(
"No training example was returned from the API for student with Submission UUID {}".format(
self._block.submission_uuid
),
)
Expand Down
3 changes: 2 additions & 1 deletion openassessment/xblock/apis/grades_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
class GradesAPI:
def __init__(self, block):
self._block = block
self.workflow_data = block.workflow_data

def _get_submission_uuid(self):
return self._block.submission_uuid
Expand All @@ -22,7 +23,7 @@ def score_overridden(self):
Returns: True if score was overridden by staff, False otherwise.
"""
workflow = self._block.get_workflow_info()
workflow = self.workflow_data.workflow
score = workflow['score']

complete = score is not None
Expand Down
3 changes: 1 addition & 2 deletions openassessment/xblock/apis/ora_data_accessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
from openassessment.xblock.apis.grades_api import GradesAPI
from openassessment.xblock.apis.ora_config_api import ORAConfigAPI
from openassessment.xblock.apis.submissions.submissions_api import SubmissionAPI
from openassessment.xblock.apis.workflow_api import WorkflowAPI
from openassessment.xblock.apis.assessments.peer_assessment_api import PeerAssessmentAPI
from openassessment.xblock.apis.assessments.self_assessment_api import SelfAssessmentAPI
from openassessment.xblock.apis.assessments.staff_assessment_api import (
Expand All @@ -27,7 +26,7 @@ def submission_data(self):

@property
def workflow_data(self):
return WorkflowAPI(self._block)
return self._block.workflow_data

@property
def grades_data(self):
Expand Down
36 changes: 29 additions & 7 deletions openassessment/xblock/apis/workflow_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,30 @@
class WorkflowAPI:
def __init__(self, block):
self._block = block
self.grades = self._block.grades_data
self._workflow = self._block.get_workflow_info()

def get_workflow_info(self, submission_uuid=None):
return self._block.get_workflow_info(submission_uuid)
"""
Update workflow info and return workflow for the submission.
NOTE - Calls workflow update and caches result. When using new ORA
experience, this needs to be called instead of the base
get_workflow_info to update the cached value correctly.
"""
self._workflow = self._block.get_workflow_info(submission_uuid)
return self._workflow

@property
def workflow(self):
return self.get_workflow_info()
"""
Getter for workflow, used to keep us from updating workflow every time
we ask for info.
NOTE - when there isn't a workflow, this will try to refresh workflow.
"""
if not self._workflow:
return self.get_workflow_info()
return self._workflow

@property
def has_workflow(self):
Expand Down Expand Up @@ -72,6 +88,12 @@ def next_incomplete_step(self):

return "done"

@property
def status(self):
if self.workflow:
return self.workflow.get("status")
return None

def has_reached_given_step(self, requested_step, current_workflow_step=None):
"""
Helper to determine if are far enough through a workflow to request data for a step.
Expand Down Expand Up @@ -140,10 +162,6 @@ def submission_uuid(self):
def workflow_requirements(self):
return self._block.workflow_requirements()

@property
def status(self):
return self.workflow.get("status")

@property
def has_received_grade(self):
return bool(self.workflow.get("score"))
Expand All @@ -158,7 +176,11 @@ def get_course_workflow_settings(self):
return self._block.get_course_workflow_settings()

def update_workflow_status(self, submission_uuid=None):
"""
Update workflow and cache result
"""
self._block.update_workflow_status(submission_uuid)
self._workflow = self.get_workflow_info()

def create_workflow(self, submission_uuid):
self._block.create_workflow(submission_uuid)
Expand Down
2 changes: 1 addition & 1 deletion openassessment/xblock/message_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def render_message(self, data, suffix=''): # pylint: disable=unused-argument
workflow = self.get_workflow_info()
deadline_info = self._get_deadline_info()

# Finds the cannonical status of the workflow and the is_closed status of the problem
# Finds the canonical status of the workflow and the is_closed status of the problem
status = workflow.get('status')
status_details = workflow.get('status_details', {})

Expand Down
7 changes: 6 additions & 1 deletion openassessment/xblock/openassessmentblock.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,9 +293,14 @@ class OpenAssessmentBlock(
def config_data(self):
return ORAConfigAPI(self)

_workflow_data = None

@property
def workflow_data(self):
return WorkflowAPI(self)
# Initialize Workflow API only once
if not self._workflow_data:
self._workflow_data = WorkflowAPI(self)
return self._workflow_data

@property
def submission_data(self):
Expand Down
2 changes: 1 addition & 1 deletion openassessment/xblock/team_workflow_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def get_team_workflow_cancellation_info(self, team_submission_uuid):
Returns cancellation information for a particular team submission.
:param team_submission_uuid: The team_submission identifier associated with the
sumbission to return information for.
submission to return information for.
:return: The cancellation information, or None if the team submission has
not been cancelled.
"""
Expand Down
31 changes: 22 additions & 9 deletions openassessment/xblock/test/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,14 @@
"""


from contextlib import contextmanager
import copy
from functools import wraps
import json
import os.path

from unittest import mock
from mock import PropertyMock, patch
from workbench.runtime import WorkbenchRuntime

import webob
Expand Down Expand Up @@ -359,7 +361,7 @@ class SubmissionTestMixin:

def create_test_submission(self, xblock, student_item=None, submission_text=None):
"""
Helper for creating test submissions
Helper for creating test submissions. Also updates workflow status.
Args:
* xblock: The XBlock to create the submission under
Expand All @@ -379,14 +381,16 @@ def create_test_submission(self, xblock, student_item=None, submission_text=None
if submission_text is None:
submission_text = self.DEFAULT_TEST_SUBMISSION_TEXT

return submissions_actions.create_submission(
submission = submissions_actions.create_submission(
student_item,
submission_text,
xblock.config_data,
xblock.submission_data,
xblock.workflow_data
)

return submission


class SubmitAssessmentsMixin(SubmissionTestMixin):
"""
Expand Down Expand Up @@ -501,16 +505,22 @@ def set_staff_access(xblock):
xblock.xmodule_runtime.anonymous_student_id = 'Bob'

@staticmethod
def set_mock_workflow_info(xblock, workflow_status, status_details, submission_uuid):
xblock.get_workflow_info = mock.Mock(return_value={
'status': workflow_status,
'status_details': status_details,
'submission_uuid': submission_uuid
})
@contextmanager
def mock_workflow_status(workflow_status, status_details, submission_uuid):
with patch(
"openassessment.xblock.apis.workflow_api.WorkflowAPI.workflow",
new_callable=PropertyMock,
) as mock_workflow:
mock_workflow.return_value = {
"status": workflow_status,
"status_details": status_details,
"submission_uuid": submission_uuid,
}
yield

def submit_staff_assessment(self, xblock, submission, assessment):
"""
Submits a staff assessment for the specified submission.
Submits a staff assessment for the specified submission and refreshes workflow
Args:
xblock: The XBlock being assessed.
Expand All @@ -522,3 +532,6 @@ def submit_staff_assessment(self, xblock, submission, assessment):
assessment['submission_uuid'] = submission['uuid']
resp = self.request(xblock, 'staff_assess', json.dumps(assessment), response_format='json')
self.assertTrue(resp['success'])

# refresh workflow status
xblock.workflow_data.update_workflow_status()
2 changes: 1 addition & 1 deletion openassessment/xblock/test/test_peer.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,10 +71,10 @@ def test_requirements_changed(self, xblock, mock_requirements):
reflect their done status with regards to the new requirements.
"""
# Setup the peer grading scenario, using the default requirements
mock_requirements.return_value = {"peer": {"must_grade": 2, "must_be_graded_by": 2}}
self._sally_and_hal_grade_each_other_helper(xblock)

# Verify that Sally's workflow is not marked done, as the requirements are higher than 1.
mock_requirements.return_value = {"peer": {"must_grade": 2, "must_be_graded_by": 2}}
workflow_info = xblock.get_workflow_info()

# peer step is skipable. So we expect next status to be current status.
Expand Down
50 changes: 23 additions & 27 deletions openassessment/xblock/test/test_staff.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,34 +95,30 @@ def _verify_grade_templates_workflow(self, xblock):

# Verify that once the required step (self assessment) is done, the staff grade is shown as complete.
status_details = {'peer': {'complete': True}}
self.set_mock_workflow_info(
xblock, workflow_status='done', status_details=status_details, submission_uuid=submission['uuid']
)
self._assert_path_and_context(
xblock,
{
'status_value': 'Complete',
'icon_class': 'fa-check',
'step_classes': 'is--showing',
'button_active': 'aria-expanded="true"',
'xblock_id': xblock.scope_ids.usage_id
}
)
with self.mock_workflow_status('done', status_details, submission["uuid"]):
self._assert_path_and_context(
xblock,
{
'status_value': 'Complete',
'icon_class': 'fa-check',
'step_classes': 'is--showing',
'button_active': 'aria-expanded="true"',
'xblock_id': xblock.scope_ids.usage_id
}
)

# Verify that if the problem is cancelled, the staff grade reflects this.
self.set_mock_workflow_info(
xblock, workflow_status='cancelled', status_details=status_details, submission_uuid=submission['uuid']
)
self._assert_path_and_context(
xblock,
{
'status_value': 'Cancelled',
'icon_class': 'fa-exclamation-triangle',
'button_active': 'disabled="disabled" aria-expanded="false"',
'step_classes': 'is--unavailable',
'xblock_id': xblock.scope_ids.usage_id
}
)
with self.mock_workflow_status('cancelled', status_details, submission['uuid']):
self._assert_path_and_context(
xblock,
{
'status_value': 'Cancelled',
'icon_class': 'fa-exclamation-triangle',
'button_active': 'disabled="disabled" aria-expanded="false"',
'step_classes': 'is--unavailable',
'xblock_id': xblock.scope_ids.usage_id
}
)

@scenario('data/grade_waiting_scenario.xml', user_id='Omar')
def test_staff_grade_templates_no_peer(self, xblock):
Expand Down Expand Up @@ -267,7 +263,7 @@ def test_assessment_error(self, xblock):
assessment['submission_uuid'] = submission['uuid']

with patch('openassessment.xblock.ui_mixins.legacy.views.staff.staff_api') as mock_api:
# Simulate a error
# Simulate an error
mock_api.create_assessment.side_effect = staff_api.StaffAssessmentRequestError
resp = self.request(xblock, 'staff_assess', json.dumps(STAFF_GOOD_ASSESSMENT), response_format='json')
self.assertFalse(resp['success'])
Expand Down
Loading

0 comments on commit c29b7f3

Please sign in to comment.