diff --git a/README.md b/README.md index 5c0c1402d7..2589105bd5 100644 --- a/README.md +++ b/README.md @@ -146,7 +146,7 @@ grant all privileges on database circ to palace; ### Redis -Redis is used as the broker for Celery. You can run Redis with docker using the following command: +Redis is used as the broker for Celery and the caching layer. You can run Redis with docker using the following command: ```sh docker run -d --name redis -p 6379:6379 redis @@ -193,6 +193,19 @@ We support overriding a number of other Celery settings via environment variable the defaults should be sufficient. The full list of settings can be found in [`service/celery/configuration.py`](src/palace/manager/service/celery/configuration.py). +#### Redis + +We use Redis as the caching layer for the application. Although you can use the same redis database for both +Celery and caching, we recommend that you use a separate database for each purpose to avoid conflicts. + +- `PALACE_REDIS_URL`: The URL of the Redis instance to use for caching. (**required**). + - for example: + ```sh + export PALACE_REDIS_URL="redis://localhost:6379/1" + ``` +- `PALACE_REDIS_KEY_PREFIX`: The prefix to use for keys stored in the Redis instance. The default is `palace`. + This is useful if you want to use the same Redis database for multiple CM (optional). + #### General - `PALACE_BASE_URL`: The base URL of the application. Used to create absolute links. (optional) diff --git a/docker-compose.yml b/docker-compose.yml index 9fbd5deee3..f9e801d4a2 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -20,6 +20,7 @@ x-cm-variables: &cm PALACE_CELERY_BROKER_URL: "redis://redis:6379/0" PALACE_CELERY_BROKER_TRANSPORT_OPTIONS_GLOBAL_KEYPREFIX: "test" PALACE_CELERY_CLOUDWATCH_STATISTICS_DRYRUN: "true" + PALACE_REDIS_URL: "redis://redis:6379/1" # Set up the environment variables used for testing as well PALACE_TEST_DATABASE_URL: "postgresql://palace:test@pg:5432/circ" @@ -27,6 +28,7 @@ x-cm-variables: &cm PALACE_TEST_MINIO_URL: "http://minio:9000" PALACE_TEST_MINIO_USER: "palace" PALACE_TEST_MINIO_PASSWORD: "test123456789" + PALACE_TEST_REDIS_URL: "redis://redis:6379/2" depends_on: pg: condition: service_healthy diff --git a/poetry.lock b/poetry.lock index f27592b06d..60a93b41db 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. [[package]] name = "alembic" @@ -1530,8 +1530,6 @@ files = [ {file = "frozendict-2.4.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d13b4310db337f4d2103867c5a05090b22bc4d50ca842093779ef541ea9c9eea"}, {file = "frozendict-2.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:b3b967d5065872e27b06f785a80c0ed0a45d1f7c9b85223da05358e734d858ca"}, {file = "frozendict-2.4.4-cp39-cp39-win_arm64.whl", hash = "sha256:4ae8d05c8d0b6134bfb6bfb369d5fa0c4df21eabb5ca7f645af95fdc6689678e"}, - {file = "frozendict-2.4.4-py311-none-any.whl", hash = "sha256:705efca8d74d3facbb6ace80ab3afdd28eb8a237bfb4063ed89996b024bc443d"}, - {file = "frozendict-2.4.4-py312-none-any.whl", hash = "sha256:d9647563e76adb05b7cde2172403123380871360a114f546b4ae1704510801e5"}, {file = "frozendict-2.4.4.tar.gz", hash = "sha256:3f7c031b26e4ee6a3f786ceb5e3abf1181c4ade92dce1f847da26ea2c96008c7"}, ] @@ -3961,13 +3959,13 @@ full = ["numpy"] [[package]] name = "redis" -version = "5.0.4" +version = "5.0.5" description = "Python client for Redis database and key-value store" optional = false python-versions = ">=3.7" files = [ - {file = "redis-5.0.4-py3-none-any.whl", hash = "sha256:7adc2835c7a9b5033b7ad8f8918d09b7344188228809c98df07af226d39dec91"}, - {file = "redis-5.0.4.tar.gz", hash = "sha256:ec31f2ed9675cc54c21ba854cfe0462e6faf1d83c8ce5944709db8a4700b9c61"}, + {file = "redis-5.0.5-py3-none-any.whl", hash = "sha256:30b47d4ebb6b7a0b9b40c1275a19b87bb6f46b3bed82a89012cf56dea4024ada"}, + {file = "redis-5.0.5.tar.gz", hash = "sha256:3417688621acf6ee368dec4a04dd95881be24efd34c79f00d31f62bb528800ae"}, ] [package.dependencies] @@ -4684,6 +4682,21 @@ files = [ {file = "types_PyYAML-6.0.12.20240311-py3-none-any.whl", hash = "sha256:b845b06a1c7e54b8e5b4c683043de0d9caf205e7434b3edc678ff2411979b8f6"}, ] +[[package]] +name = "types-redis" +version = "4.6.0.20240425" +description = "Typing stubs for redis" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-redis-4.6.0.20240425.tar.gz", hash = "sha256:9402a10ee931d241fdfcc04592ebf7a661d7bb92a8dea631279f0d8acbcf3a22"}, + {file = "types_redis-4.6.0.20240425-py3-none-any.whl", hash = "sha256:ac5bc19e8f5997b9e76ad5d9cf15d0392d9f28cf5fc7746ea4a64b989c45c6a8"}, +] + +[package.dependencies] +cryptography = ">=35.0.0" +types-pyOpenSSL = "*" + [[package]] name = "types-requests" version = "2.31.0.6" @@ -5038,4 +5051,4 @@ lxml = ">=3.8" [metadata] lock-version = "2.0" python-versions = ">=3.10,<4" -content-hash = "5538037fe8bd607d0278f5688a9aa877e73140a647928f87b0f30f7823f8adce" +content-hash = "6926c293e113bb1539170fb512788115a9946d1c74d0dbd439d63fdbf2805198" diff --git a/pyproject.toml b/pyproject.toml index 48610f8419..67445dda30 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -263,6 +263,7 @@ python-dateutil = "2.9.0.post0" python3-saml = "^1.16" # python-saml is required for SAML authentication pytz = "^2023.3" pyyaml = "^6.0" +redis = "^5.0.5" redmail = "^0.6.0" requests = "^2.29" sqlalchemy = {version = "^1.4", extras = ["mypy"]} @@ -308,6 +309,7 @@ types-Pillow = "^10.0.0" types-psycopg2 = "^2.9.21" types-python-dateutil = "^2.8.19" types-pytz = "^2024.1" +types-redis = "^4.6.0.20240425" types-requests = "^2.28.11" [tool.poetry.group.pg] diff --git a/src/palace/manager/api/circulation.py b/src/palace/manager/api/circulation.py index 66403cac1f..d9292cb5d4 100644 --- a/src/palace/manager/api/circulation.py +++ b/src/palace/manager/api/circulation.py @@ -1,18 +1,18 @@ from __future__ import annotations +import dataclasses import datetime import logging from abc import ABC, abstractmethod from collections.abc import Iterable, Mapping -from threading import Thread from typing import Any, Literal, TypeVar import flask from flask import Response from flask_babel import lazy_gettext as _ from pydantic import PositiveInt -from sqlalchemy.orm import Query, Session -from sqlalchemy.orm.scoping import ScopedSession +from sqlalchemy import select +from sqlalchemy.orm import Session from palace.manager.api.circulation_exceptions import ( AlreadyCheckedOut, @@ -42,7 +42,6 @@ FormField, ) from palace.manager.service.analytics.analytics import Analytics -from palace.manager.service.logging.configuration import LogLevel from palace.manager.sqlalchemy.model.circulationevent import CirculationEvent from palace.manager.sqlalchemy.model.collection import Collection from palace.manager.sqlalchemy.model.datasource import DataSource @@ -58,7 +57,7 @@ from palace.manager.sqlalchemy.model.resource import Resource from palace.manager.sqlalchemy.util import get_one from palace.manager.util.datetime_helpers import utc_now -from palace.manager.util.log import LoggerMixin, log_elapsed_time +from palace.manager.util.log import LoggerMixin from palace.manager.util.problem_detail import ProblemDetail @@ -587,6 +586,8 @@ class BaseCirculationApiSettings(BaseSettings): SettingsType = TypeVar("SettingsType", bound=BaseCirculationApiSettings, covariant=True) LibrarySettingsType = TypeVar("LibrarySettingsType", bound=BaseSettings, covariant=True) +LoanOrHoldT = TypeVar("LoanOrHoldT", Loan, Hold) + class BaseCirculationAPI( HasLibraryIntegrationConfiguration[SettingsType, LibrarySettingsType], @@ -751,6 +752,11 @@ class PatronActivityCirculationAPI( were made outside the Palace platform. """ + @dataclasses.dataclass(frozen=True) + class IdentifierKey: + type: str | None + identifier: str | None + @abstractmethod def patron_activity( self, patron: Patron, pin: str | None @@ -758,79 +764,143 @@ def patron_activity( """Return a patron's current checkouts and holds.""" ... + def remote_holds_and_loans( + self, patron: Patron, pin: str | None + ) -> tuple[dict[IdentifierKey, LoanInfo], dict[IdentifierKey, HoldInfo]]: + remote_loans = {} + remote_holds = {} + for activity in self.patron_activity(patron, pin): + key = self.IdentifierKey(activity.identifier_type, activity.identifier) + if isinstance(activity, LoanInfo): + remote_loans[key] = activity + elif isinstance(activity, HoldInfo): + remote_holds[key] = activity + + return remote_loans, remote_holds + + def local_loans_or_holds( + self, patron: Patron, item_cls: type[LoanOrHoldT] + ) -> dict[IdentifierKey, LoanOrHoldT]: + items = self._db.scalars( + select(item_cls) + .join(LicensePool) + .where( + LicensePool.collection_id == self.collection_id, + item_cls.patron == patron, + ) + ).all() -PatronActivityCirculationApiType = PatronActivityCirculationAPI[ - BaseCirculationApiSettings, BaseSettings -] - + items_by_identifier = {} + for item in items: + license_pool = item.license_pool + if license_pool.identifier is None: + self.log.error( + "Active loan or hold (%r) on license pool (%r), which has no identifier.", + item, + license_pool, + ) + continue -T = TypeVar("T") + key = self.IdentifierKey( + license_pool.identifier.type, + license_pool.identifier.identifier, + ) + items_by_identifier[key] = item + return items_by_identifier + + def local_loans_and_holds( + self, patron: Patron + ) -> tuple[dict[IdentifierKey, Loan], dict[IdentifierKey, Hold]]: + return self.local_loans_or_holds(patron, Loan), self.local_loans_or_holds( + patron, Hold + ) + def delete_loans_or_holds(self, loans_or_holds: Iterable[LoanOrHoldT]) -> None: + one_minute_ago = utc_now() - datetime.timedelta(minutes=1) + for item in loans_or_holds: + if item.start is not None and item.start > one_minute_ago: + # This was just created, we shouldn't delete it. + continue + self.log.info( + "Deleting %s (%r) for patron (%s)" + % (item.__class__.__name__, item, item.patron.authorization_identifier) + ) + self._db.delete(item) -class PatronActivityThread(Thread, LoggerMixin): - def __init__( + def sync_loans( self, - db: Session, - api_cls: type[PatronActivityCirculationApiType], - collection_id: int | None, - patron_id: int | None, - pin: str | None, + patron: Patron, + remote_loans: Mapping[IdentifierKey, LoanInfo], + local_loans: Mapping[IdentifierKey, Loan], ) -> None: - self.db = db - self.api_cls = api_cls - self.collection_id = collection_id - self.patron_id = patron_id - self.pin = pin - self.activity: list[LoanInfo | HoldInfo] | None = None - self.error = False - super().__init__() + # Update the local loans and to match the remote loans + for identifier, loan in remote_loans.items(): + pool = loan.license_pool(self._db) + start = loan.start_date + end = loan.end_date - @property - def details(self) -> str: - return f"Api '{self.api_cls.__name__}' / Collection '{self.collection_id}' / Patron '{self.patron_id}'" + if identifier in local_loans: + # We already have the Loan object, we don't need to look + # it up again. + local_loan = local_loans[identifier] - def load_from_db(self, identifier: int | None, model: type[T]) -> T | None: - if identifier is None: - self.log.error(f"No {model.__name__} ID provided. ({self.details})") - return None - loaded_model = get_one(self.db, model, id=identifier) - if loaded_model is None: - self.log.error( - f"{model.__name__} with ID {identifier} not found. ({self.details})" - ) - return loaded_model + # But maybe the remote's opinions as to the loan's + # start or end date have changed. + if start: + local_loan.start = start + if end: + local_loan.end = end + else: + local_loan, _ = pool.loan_to(patron, start, end) - @property - def patron(self) -> Patron | None: - return self.load_from_db(self.patron_id, Patron) + if loan.locked_to: + # The loan source is letting us know that the loan is + # locked to a specific delivery mechanism. Even if + # this is the first we've heard of this loan, + # it may have been created in another app or through + # a library-website integration. + loan.locked_to.apply(local_loan, autocommit=False) - @property - def collection(self) -> Collection | None: - return self.load_from_db(self.collection_id, Collection) + loans_to_delete = [ + local_loans[i] for i in local_loans.keys() - remote_loans.keys() + ] + self.delete_loans_or_holds(loans_to_delete) - def api(self, collection: Collection) -> PatronActivityCirculationApiType: - return self.api_cls(self.db, collection) + def sync_holds( + self, + patron: Patron, + remote_holds: Mapping[IdentifierKey, HoldInfo], + local_holds: Mapping[IdentifierKey, Hold], + ) -> None: + # Update the local holds to match the remote holds + for identifier, hold in remote_holds.items(): + pool = hold.license_pool(self._db) + start = hold.start_date + end = hold.end_date + position = hold.hold_position - @log_elapsed_time(log_level=LogLevel.debug) - def run(self) -> None: - try: - patron = self.patron - if patron is None: - return - - collection = self.collection - if collection is None: - return - api = self.api(collection) - self.activity = list(api.patron_activity(patron, self.pin)) - except: - self.log.exception(f"Error fetching patron activity. ({self.details})") - self.error = True - finally: - # In tests we don't have a scoped session here, however when this - # code is run normally we need to remove the session from the thread. - if isinstance(self.db, ScopedSession): - self.db.remove() + if identifier in local_holds: + # We already have the Hold object, we don't need to look + # it up again. + local_hold = local_holds[identifier] + + # But maybe the remote's opinions as to the hold's + # start or end date have changed. + local_hold.update(start, end, position) + else: + local_hold, _ = pool.on_hold_to(patron, start, end, position) + + holds_to_delete = [ + local_holds[i] for i in local_holds.keys() - remote_holds.keys() + ] + self.delete_loans_or_holds(holds_to_delete) + + def sync_patron_activity(self, patron: Patron, pin: str | None) -> None: + remote_loans, remote_holds = self.remote_holds_and_loans(patron, pin) + local_loans, local_holds = self.local_loans_and_holds(patron) + + self.sync_loans(patron, remote_loans, local_loans) + self.sync_holds(patron, remote_holds, local_holds) class CirculationAPI(LoggerMixin): @@ -874,16 +944,6 @@ def __init__( # associated with an API object. self.api_for_collection = library_collection_apis - # When we get our view of a patron's loans and holds, we need - # to include loans whose license pools are in one of the - # Collections we manage and whose API is capable of returning - # patron activity. - self.collection_ids_for_sync = [ - _id - for _id, api in self.api_for_collection.items() - if isinstance(api, PatronActivityCirculationAPI) - ] - @property def library(self) -> Library | None: if self.library_id is None: @@ -1024,26 +1084,6 @@ def borrow( loan_info = None hold_info = None - if existing_loan and isinstance(api, PatronActivityCirculationAPI): - # If we are able to sync patrons loans and holds from the - # remote API, we do that to see if the loan still exists. If - # it does, we still want to perform a 'checkout' operation - # on the API, because that's how loans are renewed, but - # certain error conditions (like NoAvailableCopies) mean - # something different if you already have a confirmed - # active loan. - - # TODO: This would be a great place to pass in only the - # single API that needs to be synced. - self.sync_bookshelf(patron, pin, force=True) - existing_loan = get_one( - self._db, - Loan, - patron=patron, - license_pool=licensepool, - on_multiple="interchangeable", - ) - new_loan = False # Some exceptions may be raised during the borrow process even @@ -1355,7 +1395,6 @@ def fulfill( pin: str, licensepool: LicensePool, delivery_mechanism: LicensePoolDeliveryMechanism, - sync_on_failure: bool = True, ) -> FulfillmentInfo: """Fulfil a book that a patron has previously checked out. @@ -1383,20 +1422,7 @@ def fulfill( if not loan and not self.can_fulfill_without_loan( patron, licensepool, delivery_mechanism ): - if sync_on_failure and isinstance(api, PatronActivityCirculationAPI): - # Sync and try again. - # TODO: Pass in only the single collection or LicensePool - # that needs to be synced. - self.sync_bookshelf(patron, pin, force=True) - return self.fulfill( - patron, - pin, - licensepool=licensepool, - delivery_mechanism=delivery_mechanism, - sync_on_failure=False, - ) - else: - raise NoActiveLoan(_("Cannot find your active loan for this work.")) + raise NoActiveLoan(_("Cannot find your active loan for this work.")) if ( loan and loan.fulfillment is not None @@ -1467,7 +1493,6 @@ def revoke_loan( __transaction = self._db.begin_nested() logging.info(f"In revoke_loan(), deleting loan #{loan.id}") self._db.delete(loan) - patron.last_loan_activity_sync = None __transaction.commit() # Send out an analytics event to record the fact that @@ -1504,7 +1529,6 @@ def release_hold( if hold: __transaction = self._db.begin_nested() self._db.delete(hold) - patron.last_loan_activity_sync = None __transaction.commit() # Send out an analytics event to record the fact that @@ -1518,253 +1542,6 @@ def release_hold( return True - @log_elapsed_time( - message_prefix="patron_activity sync", log_level=LogLevel.debug, skip_start=True - ) - def patron_activity( - self, patron: Patron, pin: str | None - ) -> tuple[list[LoanInfo], list[HoldInfo], bool]: - """Return a record of the patron's current activity - vis-a-vis all relevant external loan sources. - - We check each source in a separate thread for speed. - - :return: A 2-tuple (loans, holds) containing `HoldInfo` and - `LoanInfo` objects. - """ - threads = [] - for api in list(self.api_for_collection.values()): - if isinstance(api, PatronActivityCirculationAPI): - api_cls = type(api) - collection_id = api.collection_id - thread = PatronActivityThread( - self._db, api_cls, collection_id, patron.id, pin - ) - threads.append(thread) - for thread in threads: - thread.start() - for thread in threads: - thread.join() - loans: list[LoanInfo] = [] - holds: list[HoldInfo] = [] - complete = True - for thread in threads: - if thread.error: - # Something went wrong, so we don't have a complete - # picture of the patron's loans. - complete = False - if thread.activity: - for i in thread.activity: - if not isinstance(i, (LoanInfo, HoldInfo)): - self.log.warning( # type: ignore[unreachable] - "value %r from patron_activity is neither a loan nor a hold.", - i, - ) - continue - - if isinstance(i, LoanInfo): - loans.append(i) - elif isinstance(i, HoldInfo): - holds.append(i) - - return loans, holds, complete - - def local_loans(self, patron: Patron) -> Query[Loan]: - return ( - self._db.query(Loan) - .join(Loan.license_pool) - .filter(LicensePool.collection_id.in_(self.collection_ids_for_sync)) - .filter(Loan.patron == patron) - ) - - def local_holds(self, patron: Patron) -> Query[Hold]: - return ( - self._db.query(Hold) - .join(Hold.license_pool) - .filter(LicensePool.collection_id.in_(self.collection_ids_for_sync)) - .filter(Hold.patron == patron) - ) - - def sync_bookshelf( - self, patron: Patron, pin: str | None, force: bool = False - ) -> tuple[list[Loan] | Query[Loan], list[Hold] | Query[Hold]]: - """Sync our internal model of a patron's bookshelf with any external - vendors that provide books to the patron's library. - - :param patron: A Patron. - :param pin: The password authenticating the patron; used by some vendors - that perform a cross-check against the library ILS. - :param force: If this is True, the method will call out to external - vendors even if it looks like the system has up-to-date information - about the patron. - """ - # Get our internal view of the patron's current state. - local_loans = self.local_loans(patron) - local_holds = self.local_holds(patron) - - if patron and patron.last_loan_activity_sync and not force: - # Our local data is considered fresh, so we can return it - # without calling out to the vendor APIs. - return local_loans, local_holds - - # Assuming everything goes well, we will set - # Patron.last_loan_activity_sync to this value -- the moment - # just before we started contacting the vendor APIs. - last_loan_activity_sync: datetime.datetime | None = utc_now() - - # Update the external view of the patron's current state. - remote_loans, remote_holds, complete = self.patron_activity(patron, pin) - __transaction = self._db.begin_nested() - - if not complete: - # We were not able to get a complete picture of the - # patron's loan activity. Until we are able to do that, we - # should never assume that our internal model of the - # patron's loans is good enough to cache. - last_loan_activity_sync = None - - now = utc_now() - local_loans_by_identifier = {} - local_holds_by_identifier = {} - for l in local_loans: - if not l.license_pool: - self.log.error("Active loan with no license pool!") - continue - i = l.license_pool.identifier - if not i: - self.log.error( - "Active loan on license pool %s, which has no identifier!", - l.license_pool, - ) - continue - key = (i.type, i.identifier) - local_loans_by_identifier[key] = l - for h in local_holds: - if not h.license_pool: - self.log.error("Active hold with no license pool!") - continue - i = h.license_pool.identifier - if not i: - self.log.error( - "Active hold on license pool %r, which has no identifier!", - h.license_pool, - ) - continue - key = (i.type, i.identifier) - local_holds_by_identifier[key] = h - - active_loans = [] - active_holds = [] - start: datetime.datetime | None - end: datetime.datetime | None - for loan in remote_loans: - # This is a remote loan. Find or create the corresponding - # local loan. - pool = loan.license_pool(self._db) - start = loan.start_date - end = loan.end_date - key = (loan.identifier_type, loan.identifier) - if key in local_loans_by_identifier: - # We already have the Loan object, we don't need to look - # it up again. - local_loan = local_loans_by_identifier[key] - - # But maybe the remote's opinions as to the loan's - # start or end date have changed. - if start: - local_loan.start = start - if end: - local_loan.end = end - else: - local_loan, new = pool.loan_to(patron, start, end) - - if loan.locked_to: - # The loan source is letting us know that the loan is - # locked to a specific delivery mechanism. Even if - # this is the first we've heard of this loan, - # it may have been created in another app or through - # a library-website integration. - loan.locked_to.apply(local_loan, autocommit=False) - active_loans.append(local_loan) - - # Check the local loan off the list we're keeping so we - # don't delete it later. - key = (loan.identifier_type, loan.identifier) - if key in local_loans_by_identifier: - del local_loans_by_identifier[key] - - for hold in remote_holds: - # This is a remote hold. Find or create the corresponding - # local hold. - pool = hold.license_pool(self._db) - start = hold.start_date - end = hold.end_date - position = hold.hold_position - key = (hold.identifier_type, hold.identifier) - if key in local_holds_by_identifier: - # We already have the Hold object, we don't need to look - # it up again. - local_hold = local_holds_by_identifier[key] - - # But maybe the remote's opinions as to the hold's - # start or end date have changed. - local_hold.update(start, end, position) - else: - local_hold, new = pool.on_hold_to(patron, start, end, position) - active_holds.append(local_hold) - - # Check the local hold off the list we're keeping so that - # we don't delete it later. - if key in local_holds_by_identifier: - del local_holds_by_identifier[key] - - # We only want to delete local loans and holds if we were able to - # successfully sync with all the providers. If there was an error, - # the provider might still know about a loan or hold that we don't - # have in the remote lists. - if complete: - # Every loan remaining in loans_by_identifier is a hold that - # the provider doesn't know about. This usually means it's expired - # and we should get rid of it, but it's possible the patron is - # borrowing a book and syncing their bookshelf at the same time, - # and the local loan was created after we got the remote loans. - # If the loan's start date is less than a minute ago, we'll keep it. - for local_loan in list(local_loans_by_identifier.values()): - if ( - local_loan.license_pool.collection_id - in self.collection_ids_for_sync - ): - one_minute_ago = utc_now() - datetime.timedelta(minutes=1) - if local_loan.start is None or local_loan.start < one_minute_ago: - logging.info( - "In sync_bookshelf for patron %s, deleting loan %s (patron %s)" - % ( - patron.authorization_identifier, - str(local_loan.id), - local_loan.patron.authorization_identifier, - ) - ) - self._db.delete(local_loan) - else: - logging.info( - "In sync_bookshelf for patron %s, found local loan %s created in the past minute that wasn't in remote loans" - % (patron.authorization_identifier, str(local_loan.id)) - ) - - # Every hold remaining in holds_by_identifier is a hold that - # the provider doesn't know about, which means it's expired - # and we should get rid of it. - for local_hold in list(local_holds_by_identifier.values()): - if ( - local_hold.license_pool.collection_id - in self.collection_ids_for_sync - ): - self._db.delete(local_hold) - - # Now that we're in sync (or not), set last_loan_activity_sync - # to the conservative value obtained earlier. - if patron: - patron.last_loan_activity_sync = last_loan_activity_sync - - __transaction.commit() - return active_loans, active_holds + def supports_patron_activity(self, pool: LicensePool) -> bool: + api = self.api_for_license_pool(pool) + return isinstance(api, PatronActivityCirculationAPI) diff --git a/src/palace/manager/api/circulation_manager.py b/src/palace/manager/api/circulation_manager.py index 99482845eb..36d569c7e3 100644 --- a/src/palace/manager/api/circulation_manager.py +++ b/src/palace/manager/api/circulation_manager.py @@ -40,6 +40,9 @@ ) from palace.manager.service.analytics.analytics import Analytics from palace.manager.service.container import Services +from palace.manager.service.integration_registry.license_providers import ( + LicenseProvidersRegistry, +) from palace.manager.service.logging.configuration import LogLevel from palace.manager.sqlalchemy.model.collection import Collection from palace.manager.sqlalchemy.model.discovery_service_registration import ( @@ -259,10 +262,12 @@ def load_settings(self): message_prefix="load_settings - create collection apis", ): collection_apis = {} - registry = self.services.integration_registry.license_providers() + registry: LicenseProvidersRegistry = ( + self.services.integration_registry.license_providers() + ) for collection in collections: try: - api = registry[collection.protocol](self._db, collection) + api = registry.from_collection(self._db, collection) collection_apis[collection.id] = api except CannotLoadConfiguration as exception: self.log.exception( diff --git a/src/palace/manager/api/controller/circulation_manager.py b/src/palace/manager/api/controller/circulation_manager.py index 46691eb65f..2f69de8042 100644 --- a/src/palace/manager/api/controller/circulation_manager.py +++ b/src/palace/manager/api/controller/circulation_manager.py @@ -1,12 +1,8 @@ from __future__ import annotations -from datetime import datetime -from email.utils import parsedate_to_datetime from typing import TypeVar import flask -import pytz -from flask import Response from flask_babel import lazy_gettext as _ from sqlalchemy import select from sqlalchemy.orm import Session, eagerload @@ -23,6 +19,7 @@ ) from palace.manager.core.problem_details import INVALID_INPUT from palace.manager.search.external_search import ExternalSearchIndex +from palace.manager.service.redis.redis import Redis from palace.manager.sqlalchemy.model.collection import Collection from palace.manager.sqlalchemy.model.identifier import Identifier from palace.manager.sqlalchemy.model.integration import ( @@ -100,46 +97,9 @@ def search_engine(self) -> ExternalSearchIndex | ProblemDetail: ) return search_engine # type: ignore[no-any-return] - def handle_conditional_request( - self, last_modified: datetime | None = None - ) -> Response | None: - """Handle a conditional HTTP request. - - :param last_modified: A datetime representing the time this - resource was last modified. - - :return: a Response, if the incoming request can be handled - conditionally. Otherwise, None. - """ - if not last_modified: - return None - - # If-Modified-Since values have resolution of one second. If - # last_modified has millisecond resolution, change its - # resolution to one second. - if last_modified.microsecond: - last_modified = last_modified.replace(microsecond=0) - - if_modified_since = flask.request.headers.get("If-Modified-Since") - if not if_modified_since: - return None - - try: - parsed_if_modified_since = parsedate_to_datetime(if_modified_since) - except ValueError: - return None - if not parsed_if_modified_since: - return None - - # "[I]f the date is conforming to the RFCs it will represent a - # time in UTC but with no indication of the actual source - # timezone of the message the date comes from." - if parsed_if_modified_since.tzinfo is None: - parsed_if_modified_since = parsed_if_modified_since.replace(tzinfo=pytz.UTC) - - if parsed_if_modified_since >= last_modified: - return Response(status=304) - return None + @property + def redis_client(self) -> Redis: + return self.manager.services.redis.client() # type: ignore[no-any-return] def load_lane(self, lane_identifier: int | None) -> Lane | WorkList | ProblemDetail: """Turn user input into a Lane object.""" diff --git a/src/palace/manager/api/controller/loan.py b/src/palace/manager/api/controller/loan.py index c37cb62cd8..f7bfd3973e 100644 --- a/src/palace/manager/api/controller/loan.py +++ b/src/palace/manager/api/controller/loan.py @@ -24,8 +24,10 @@ NO_ACTIVE_LOAN_OR_HOLD, NO_LICENSES, ) +from palace.manager.celery.tasks.patron_activity import sync_patron_activity from palace.manager.core.problem_details import INTERNAL_SERVER_ERROR from palace.manager.feed.acquisition import OPDSAcquisitionFeed +from palace.manager.service.redis.models.patron_activity import PatronActivity from palace.manager.sqlalchemy.model.datasource import DataSource from palace.manager.sqlalchemy.model.library import Library from palace.manager.sqlalchemy.model.licensing import ( @@ -58,32 +60,17 @@ def sync(self) -> Response: self.log.exception(f"Could not parse refresh query parameter.") refresh = True - # Save some time if we don't believe the patron's loans or holds have - # changed since the last time the client requested this feed. - response = self.handle_conditional_request(patron.last_loan_activity_sync) - if isinstance(response, Response): - return response - - # TODO: SimplyE used to make a HEAD request to the bookshelf feed - # as a quick way of checking authentication. Does this still happen? - # It shouldn't -- the patron profile feed should be used instead. - # If it's not used, we can take this out. - if flask.request.method == "HEAD": - return Response() - - # First synchronize our local list of loans and holds with all - # third-party loan providers. + # Queue up tasks to sync the patron's activity with any third-party providers, + # that need to be synced. We don't wait for the task to complete, so we can return + # the feed immediately. If our knowledge of the loans is out of date, the patron will + # see the updated information when they refresh the page. if patron.authorization_identifier and refresh: header = self.authorization_header() credential = self.manager.auth.get_credential_from_header(header) - try: - self.circulation.sync_bookshelf(patron, credential) - except Exception as e: - # If anything goes wrong, omit the sync step and just - # display the current active loans, as we understand them. - self.manager.log.error( - "ERROR DURING SYNC for %s: %r", patron.id, e, exc_info=e - ) + for collection in PatronActivity.collections_ready_for_sync( + self.redis_client, patron + ): + sync_patron_activity.apply_async((collection.id, patron.id, credential)) # Then make the feed. feed = OPDSAcquisitionFeed.active_loans_for(self.circulation, patron) @@ -93,9 +80,6 @@ def sync(self) -> Response: mime_types=flask.request.accept_mimetypes, ) - last_modified = patron.last_loan_activity_sync - if last_modified: - response.last_modified = last_modified return response def borrow( @@ -141,11 +125,22 @@ def borrow( return loan_or_hold_or_pd loan_or_hold = loan_or_hold_or_pd - # At this point we have either a loan or a hold. If a loan, serve - # a feed that tells the patron how to fulfill the loan. If a hold, - # serve a feed that talks about the hold. - # We also need to drill in the Accept header, so that it eventually - # gets sent to core.feed.opds.BaseOPDSFeed.entry_as_response + # At this point we have either a loan or a hold. + + # If it is a new loan or hold, queue up a task to sync the patron's activity with the remote. + # This way we are sure we have the most up-to-date information. + if is_new and self.circulation.supports_patron_activity( + loan_or_hold.license_pool + ): + sync_patron_activity.apply_async( + (loan_or_hold.license_pool.collection.id, patron.id, credential), + {"force": True}, + countdown=5, + ) + + # If we have a loan, serve a feed that tells the patron how to fulfill the loan. If a hold, + # serve a feed that talks about the hold. We also need to drill in the Accept header, so that + # it eventually gets sent to core.feed.opds.BaseOPDSFeed.entry_as_response response_kwargs = { "status": 201 if is_new else 200, "mime_types": flask.request.accept_mimetypes, @@ -511,6 +506,16 @@ def revoke(self, license_pool_id: int) -> OPDSEntryResponse | ProblemDetail: except (CirculationException, RemoteInitiatedServerError) as e: return e.problem_detail + # At this point we have successfully revoked the loan or hold. + # If the api supports it, queue up a task to sync the patron's activity with the remote. + # That way we are sure we have the most up-to-date information. + if self.circulation.supports_patron_activity(pool): + sync_patron_activity.apply_async( + (pool.collection.id, patron.id, credential), + {"force": True}, + countdown=5, + ) + work = pool.work annotator = self.manager.annotator(None) single_entry_feed = OPDSAcquisitionFeed.single_entry(work, annotator) diff --git a/src/palace/manager/api/opds_for_distributors.py b/src/palace/manager/api/opds_for_distributors.py index ee1cde8753..77a895be29 100644 --- a/src/palace/manager/api/opds_for_distributors.py +++ b/src/palace/manager/api/opds_for_distributors.py @@ -9,11 +9,7 @@ from flask_babel import lazy_gettext as _ from sqlalchemy.orm import Session -from palace.manager.api.circulation import ( - FulfillmentInfo, - LoanInfo, - PatronActivityCirculationAPI, -) +from palace.manager.api.circulation import BaseCirculationAPI, FulfillmentInfo, LoanInfo from palace.manager.api.circulation_exceptions import ( CannotFulfill, DeliveryMechanismError, @@ -80,9 +76,7 @@ class OPDSForDistributorsLibrarySettings(BaseSettings): class OPDSForDistributorsAPI( - PatronActivityCirculationAPI[ - OPDSForDistributorsSettings, OPDSForDistributorsLibrarySettings - ], + BaseCirculationAPI[OPDSForDistributorsSettings, OPDSForDistributorsLibrarySettings], HasCollectionSelfTests, ): BEARER_TOKEN_CREDENTIAL_TYPE = "OPDS For Distributors Bearer Token" @@ -356,29 +350,6 @@ def fulfill( content_expires=credential.expires, ) - def patron_activity( - self, patron: Patron, pin: str | None - ) -> list[LoanInfo | HoldInfo]: - # Look up loans for this collection in the database. - _db = Session.object_session(patron) - loans = ( - _db.query(Loan) - .join(Loan.license_pool) - .filter(LicensePool.collection_id == self.collection_id) - .filter(Loan.patron == patron) - ) - return [ - LoanInfo( - loan.license_pool.collection, - loan.license_pool.data_source.name, - loan.license_pool.identifier.type, - loan.license_pool.identifier.identifier, - loan.start, - loan.end, - ) - for loan in loans - ] - def release_hold(self, patron: Patron, pin: str, licensepool: LicensePool) -> None: # All the books for this integration are available as simultaneous # use, so there's no need to release a hold. diff --git a/src/palace/manager/celery/tasks/patron_activity.py b/src/palace/manager/celery/tasks/patron_activity.py new file mode 100644 index 0000000000..327a52b8f3 --- /dev/null +++ b/src/palace/manager/celery/tasks/patron_activity.py @@ -0,0 +1,76 @@ +from celery import shared_task + +from palace.manager.api.circulation import PatronActivityCirculationAPI +from palace.manager.celery.task import Task +from palace.manager.service.celery.celery import QueueNames +from palace.manager.service.integration_registry.license_providers import ( + LicenseProvidersRegistry, +) +from palace.manager.service.redis.models.patron_activity import PatronActivity +from palace.manager.service.redis.redis import Redis +from palace.manager.sqlalchemy.model.collection import Collection +from palace.manager.sqlalchemy.model.patron import Patron +from palace.manager.sqlalchemy.util import get_one + + +@shared_task(queue=QueueNames.high, bind=True) +def sync_patron_activity( + task: Task, collection_id: int, patron_id: int, pin: str | None, force: bool = False +) -> None: + redis_client: Redis = task.services.redis.client() + patron_activity_status = PatronActivity( + redis_client, collection_id, patron_id, task.request.id + ) + + if force: + # If force is True, try to clear the status before continuing with the sync. + patron_activity_status.clear() + + with patron_activity_status as acquired: + if not acquired: + status = patron_activity_status.status() + state = status.state.name if status is not None else "UNKNOWN" + task.log.info( + f"Patron activity sync task could not acquire lock. Task will not " + f"perform sync. Lock state ({state}) for patron (id: {patron_id}) and " + f"collection (id: {collection_id})." + ) + return + + with task.session() as session: + patron = get_one(session, Patron, id=patron_id) + collection = get_one(session, Collection, id=collection_id) + + if patron is None: + task.log.error( + f"Patron (id: {patron_id}) not found. Marking patron activity as failed." + ) + patron_activity_status.fail() + return + + if collection is None: + task.log.error( + f"Collection (id: {collection_id}) not found. Marking patron activity as failed." + ) + patron_activity_status.fail() + return + + registry: LicenseProvidersRegistry = ( + task.services.integration_registry.license_providers() + ) + api = registry.from_collection(session, collection) + + if not isinstance(api, PatronActivityCirculationAPI): + # Set the status to not supported, and log that we can't sync patron activity. + patron_activity_status.not_supported() + task.log.info( + f"Collection '{collection.name}' (id: {collection_id}) does not support patron activity sync." + ) + return + + api.sync_patron_activity(patron, pin) + + task.log.info( + f"Patron activity sync for patron '{patron.authorization_identifier}' (id: {patron_id}) " + f"and collection '{collection.name}' (id: {collection_id}) complete." + ) diff --git a/src/palace/manager/service/container.py b/src/palace/manager/service/container.py index 2f5041e441..10d92c20ee 100644 --- a/src/palace/manager/service/container.py +++ b/src/palace/manager/service/container.py @@ -15,6 +15,8 @@ ) from palace.manager.service.logging.configuration import LoggingConfiguration from palace.manager.service.logging.container import Logging +from palace.manager.service.redis.configuration import RedisConfiguration +from palace.manager.service.redis.container import RedisContainer from palace.manager.service.search.configuration import SearchConfiguration from palace.manager.service.search.container import Search from palace.manager.service.sitewide import SitewideConfiguration @@ -65,6 +67,11 @@ class Services(DeclarativeContainer): IntegrationRegistryContainer, ) + redis = Container( + RedisContainer, + config=config.redis, + ) + def wire_container(container: Services) -> None: container.wire( @@ -97,6 +104,7 @@ def create_container() -> Services: "email": EmailConfiguration().dict(), "celery": CeleryConfiguration().dict(), "fcm": FcmConfiguration().dict(), + "redis": RedisConfiguration().dict(), } ) wire_container(container) diff --git a/src/palace/manager/service/integration_registry/license_providers.py b/src/palace/manager/service/integration_registry/license_providers.py index 8b69c20b25..917bfc51fe 100644 --- a/src/palace/manager/service/integration_registry/license_providers.py +++ b/src/palace/manager/service/integration_registry/license_providers.py @@ -2,11 +2,14 @@ from typing import TYPE_CHECKING +from sqlalchemy.orm import Session + from palace.manager.integration.goals import Goals from palace.manager.service.integration_registry.base import IntegrationRegistry if TYPE_CHECKING: - from palace.manager.api.circulation import CirculationApiType # noqa: autoflake + from palace.manager.api.circulation import CirculationApiType + from palace.manager.sqlalchemy.model.collection import Collection class LicenseProvidersRegistry(IntegrationRegistry["CirculationApiType"]): @@ -32,3 +35,9 @@ def __init__(self) -> None: self.register(ODL2API, canonical=ODL2API.label()) self.register(OPDSAPI, canonical=OPDSAPI.label()) self.register(OPDS2API, canonical=OPDS2API.label()) + + def from_collection( + self, db: Session, collection: Collection + ) -> CirculationApiType: + impl_cls = self[collection.protocol] + return impl_cls(db, collection) diff --git a/src/palace/manager/service/redis/__init__.py b/src/palace/manager/service/redis/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/palace/manager/service/redis/configuration.py b/src/palace/manager/service/redis/configuration.py new file mode 100644 index 0000000000..1dab0a39df --- /dev/null +++ b/src/palace/manager/service/redis/configuration.py @@ -0,0 +1,13 @@ +from pydantic import RedisDsn + +from palace.manager.service.configuration.service_configuration import ( + ServiceConfiguration, +) + + +class RedisConfiguration(ServiceConfiguration): + url: RedisDsn + key_prefix: str = "palace" + + class Config: + env_prefix = "PALACE_REDIS_" diff --git a/src/palace/manager/service/redis/container.py b/src/palace/manager/service/redis/container.py new file mode 100644 index 0000000000..1bbef5cfb4 --- /dev/null +++ b/src/palace/manager/service/redis/container.py @@ -0,0 +1,22 @@ +import redis +from dependency_injector import providers +from dependency_injector.containers import DeclarativeContainer + +from palace.manager.service.redis.key import RedisKeyGenerator +from palace.manager.service.redis.redis import Redis + + +class RedisContainer(DeclarativeContainer): + config = providers.Configuration() + + connection_pool: providers.Provider[redis.ConnectionPool] = providers.Singleton( + redis.ConnectionPool.from_url, url=config.url, decode_responses=True + ) + + key_generator: providers.Provider[RedisKeyGenerator] = providers.Singleton( + RedisKeyGenerator, prefix=config.key_prefix + ) + + client: providers.Provider[Redis] = providers.Singleton( + Redis, connection_pool=connection_pool, key_generator=key_generator + ) diff --git a/src/palace/manager/service/redis/exception.py b/src/palace/manager/service/redis/exception.py new file mode 100644 index 0000000000..4159e6aca9 --- /dev/null +++ b/src/palace/manager/service/redis/exception.py @@ -0,0 +1,9 @@ +from palace.manager.core.exceptions import BasePalaceException + + +class RedisKeyError(BasePalaceException, TypeError): + ... + + +class RedisValueError(BasePalaceException, ValueError): + ... diff --git a/src/palace/manager/service/redis/key.py b/src/palace/manager/service/redis/key.py new file mode 100644 index 0000000000..17d548a72c --- /dev/null +++ b/src/palace/manager/service/redis/key.py @@ -0,0 +1,50 @@ +from typing import Protocol, runtime_checkable + +from sqlalchemy.orm import Mapped + +from palace.manager.service.redis.exception import RedisKeyError + + +class RedisKeyMixin: + id: Mapped[int] + + def redis_key(self) -> str: + return self.redis_key_from_id(self.id) + + @classmethod + def redis_key_from_id(cls, id_: int | None) -> str: + cls_name = cls.__name__ + + if id_ is None: + raise RedisKeyError(f"{cls_name} must have an id to generate a redis key.") + + return f"{cls_name}{RedisKeyGenerator.SEPERATOR}{id_}" + + +@runtime_checkable +class SupportsRedisKey(Protocol): + def redis_key(self) -> str: + ... + + +class RedisKeyGenerator: + SEPERATOR = "::" + + def __init__(self, prefix: str): + self.prefix = prefix + + def _stringify(self, key: SupportsRedisKey | str | int) -> str: + if isinstance(key, SupportsRedisKey): + return key.redis_key() + elif isinstance(key, str): + return key + elif isinstance(key, int): + return str(key) + else: + raise RedisKeyError( + f"Unsupported key type: {key} ({key.__class__.__name__})" + ) + + def __call__(self, *args: SupportsRedisKey | str | int) -> str: + key_strings = [self._stringify(k) for k in args] + return self.SEPERATOR.join([self.prefix, *key_strings]) diff --git a/src/palace/manager/service/redis/models/__init__.py b/src/palace/manager/service/redis/models/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/palace/manager/service/redis/models/patron_activity.py b/src/palace/manager/service/redis/models/patron_activity.py new file mode 100644 index 0000000000..3b4d79b5ad --- /dev/null +++ b/src/palace/manager/service/redis/models/patron_activity.py @@ -0,0 +1,393 @@ +import datetime +import sys +from functools import cached_property +from types import TracebackType +from typing import Any, Literal, NamedTuple + +from typing_extensions import Self + +from palace.manager.core.exceptions import BasePalaceException +from palace.manager.service.redis.redis import Redis +from palace.manager.sqlalchemy.model.collection import Collection +from palace.manager.sqlalchemy.model.patron import Patron +from palace.manager.util.datetime_helpers import utc_now +from palace.manager.util.log import LoggerMixin + +# TODO: Remove this when we drop support for Python 3.10 +if sys.version_info >= (3, 11): + from enum import StrEnum +else: + from backports.strenum import StrEnum + + +class PatronActivityError(BasePalaceException, RuntimeError): + ... + + +class PatronActivityStatus: + """ + Serialize and deserialize the status of a patron activity sync task from a string + stored in Redis. The string consists of three fields separated by cls.SEPERATOR: + - The state of the task. + - The timestamp of the task. + - The task id. + + The state is a one character string representing the state of the task. The timestamp + is a 19 character string representing the time the task was last updated in UTC. It is + stored in ISO 8601 format with second precision without a timezone. The task id is a + string that uniquely identifies the task. + """ + + class State(StrEnum): + """ + The state of a patron activity sync. + + The value of the enum is what gets stored in redis for the state. + """ + + # The task is currently running. + LOCKED = "L" + # The task failed to complete. + FAILED = "F" + # The task completed successfully. + SUCCESS = "S" + # The api does not support patron activity sync. + NOT_SUPPORTED = "N" + + class _FieldOffset(NamedTuple): + """ + A class to manage the offsets of fields in the redis string representation. + + This helper is here because when we slice a string in Python, the end index is + exclusive. However, when we use the redis GETRANGE command, the end index is + inclusive. This class helps us manage the difference between the two. + + The start index is inclusive and the end index is exclusive, as in Python. The + redis_end property returns the end index in the format expected by the GETRANGE + command. + """ + + start: int + end: int | None + + @property + def slice(self) -> slice: + return slice(self.start, self.end) + + @property + def redis_end(self) -> int: + """ + Get the end index in the format expected by redis GETRANGE. + """ + return self.end - 1 if self.end is not None else -1 + + @property + def redis(self) -> str: + """ + Get the start and end index as a string, that can be directly used in a redis + GETRANGE command. + """ + return f"{self.start}, {self.redis_end}" + + STATE_FIELD_LEN = 1 + TIMESTAMP_FIELD_LEN = 19 + + SEPERATOR = "::" + SEPERATOR_LEN = len(SEPERATOR) + + STATE_OFFSET = _FieldOffset(0, STATE_FIELD_LEN) + TIMESTAMP_OFFSET = _FieldOffset( + STATE_FIELD_LEN + SEPERATOR_LEN, + STATE_FIELD_LEN + TIMESTAMP_FIELD_LEN + SEPERATOR_LEN, + ) + TASK_ID_OFFSET = _FieldOffset( + STATE_FIELD_LEN + TIMESTAMP_FIELD_LEN + 2 * SEPERATOR_LEN, None + ) + + def __init__( + self, + *, + state: State, + task_id: str, + timestamp: datetime.datetime | None = None, + ): + self.state = state + self.task_id = task_id + + self.timestamp = timestamp or utc_now() + if self.timestamp.tzinfo is None: + raise ValueError("Timestamp must be timezone aware.") + + @classmethod + def from_redis(cls, data: str) -> Self: + state = data[cls.STATE_OFFSET.slice] + timestamp = data[cls.TIMESTAMP_OFFSET.slice] + aware_timestamp = datetime.datetime.fromisoformat(timestamp).replace( + tzinfo=datetime.timezone.utc + ) + task_id = data[cls.TASK_ID_OFFSET.slice] + + return cls( + state=cls.State(state), + task_id=task_id, + timestamp=aware_timestamp, + ) + + def to_redis(self) -> str: + state_str = str(self.state) + if len(state_str) != self.STATE_FIELD_LEN: + raise ValueError( + f"State field is not the correct length: {state_str}. Expected {self.STATE_FIELD_LEN} characters." + ) + + # Convert the timestamp to UTC before converting to a string. + utc_local = self.timestamp.astimezone(datetime.timezone.utc).replace( + tzinfo=None + ) + timestamp_str = utc_local.isoformat(timespec="seconds") + if len(timestamp_str) != self.TIMESTAMP_FIELD_LEN: + raise ValueError( + f"Timestamp field is not the correct length: {timestamp_str}. Expected {self.TIMESTAMP_FIELD_LEN} characters." + ) + + return self.SEPERATOR.join([state_str, timestamp_str, self.task_id]) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, PatronActivityStatus): + return NotImplemented + + return ( + self.state == other.state + and self.task_id == other.task_id + # Since we truncate the timestamp to the second when converting to a string, + # we need to compare the timestamps with a second of tolerance. + and self.timestamp - other.timestamp < datetime.timedelta(seconds=1) + ) + + +class PatronActivity(LoggerMixin): + """ + A class to manage the status of a patron activity sync in Redis. + + It provides a locking mechanism, so only one task updates the patron activity at a time, + it also provides a state, so we know if the task is running, failed, succeeded, or not supported. + Each state change stores a timestamp, so we know when the task was last updated. + + Each status is stored in redis with a timeout, so that eventually, the status will be cleared. + + The design of the locking mechanism is inspired by the Redis documentation on distributed locks: + https://redis.io/docs/latest/develop/use/patterns/distributed-locks/#correct-implementation-with-a-single-instance + """ + + LOCKED_TIMEOUT = 60 * 15 # 15 minutes + FAILED_TIMEOUT = 60 * 60 * 4 # 4 hours + SUCCESS_TIMEOUT = 60 * 60 * 12 # 12 hours + NOT_SUPPORTED_TIMEOUT = 60 * 60 * 24 * 14 # 2 week + + # We use a lua script so that we can atomically check the status and then update it + # without worrying about race conditions. The script checks that the state is + # LOCKED and the task_id matches before updating the status. + UPDATE_SCRIPT = f""" + if + redis.call('GETRANGE', KEYS[1], {PatronActivityStatus.STATE_OFFSET.redis}) == '{PatronActivityStatus.State.LOCKED}' + and redis.call('GETRANGE', KEYS[1], {PatronActivityStatus.TASK_ID_OFFSET.redis}) == ARGV[1] + then + return redis.call('SET', KEYS[1], ARGV[2], 'EX', ARGV[3]) + else + return nil + end + """ + + # Again, we use a lua script so we can atomically check and update the status. + # This script will delete the key if the status is not locked, or if the task_id + # matches the task_id that owns the lock. + CLEAR_SCRIPT = f""" + if + redis.call('GETRANGE', KEYS[1], {PatronActivityStatus.STATE_OFFSET.redis}) == '{PatronActivityStatus.State.LOCKED}' + and redis.call('GETRANGE', KEYS[1], {PatronActivityStatus.TASK_ID_OFFSET.redis}) ~= ARGV[1] + then + return nil + else + redis.call('DEL', KEYS[1]) + return 1 + end + """ + + def __init__( + self, + redis_client: Redis, + collection: Collection | int | None, + patron: Patron | int | None, + task_id: str, + ): + self._redis_client = redis_client + self._patron_id = patron.id if isinstance(patron, Patron) else patron + self._collection_id = ( + collection.id if isinstance(collection, Collection) else collection + ) + self._task_id = task_id + self._update_script = self._redis_client.register_script(self.UPDATE_SCRIPT) + self._clear_script = self._redis_client.register_script(self.CLEAR_SCRIPT) + self._in_context_manager = False + self._context_manager_acquired = False + + @cached_property + def key(self) -> str: + return self._get_key(self._redis_client, self._patron_id, self._collection_id) + + def status(self) -> PatronActivityStatus | None: + """ + Get the current status of the patron activity sync task. + + :return: If the return value is `None` there is no record of the task in + Redis. Otherwise, the return value is a `PatronActivityStatus` object. + """ + status = self._redis_client.get(self.key) + if status is None: + return None + + return PatronActivityStatus.from_redis(status) + + def lock(self) -> bool: + """ + Attempt to acquire the lock for the patron activity sync task. + + The lock can only be acquired if the task currently has no data in + redis. The lock will expire after `LOCKED_TIMEOUT` seconds. + + :return: True if the lock was acquired, False otherwise. + """ + acquired = self._redis_client.set( + self.key, + PatronActivityStatus( + state=PatronActivityStatus.State.LOCKED, task_id=self._task_id + ).to_redis(), + nx=True, + ex=self.LOCKED_TIMEOUT, + ) + return acquired is not None + + def clear(self) -> bool: + """ + Clear the status of the patron activity sync task. + + If the state is not LOCKED, any task can clear the status. If the state is + LOCKED, only the task that acquired the lock can clear the status. + + :return: True if the status was cleared, False otherwise. + """ + return ( + self._clear_script( + keys=[self.key], + args=[self._task_id], + ) + is not None + ) + + def _update(self, status: PatronActivityStatus, timeout: int) -> bool: + value_set = self._update_script( + keys=[self.key], + args=[self._task_id, status.to_redis(), timeout], + ) + return value_set is not None + + def success(self) -> bool: + """ + Mark the patron activity sync task as successful. This can only be done by + the task that acquired the lock. + + :return: True if the status was updated, False otherwise. + """ + return self._update( + PatronActivityStatus( + state=PatronActivityStatus.State.SUCCESS, task_id=self._task_id + ), + self.SUCCESS_TIMEOUT, + ) + + def fail(self) -> bool: + """ + Mark the patron activity sync task as failed. This can only be done by + the task that acquired the lock. + + :return: True if the status was updated, False otherwise. + """ + return self._update( + PatronActivityStatus( + state=PatronActivityStatus.State.FAILED, task_id=self._task_id + ), + self.FAILED_TIMEOUT, + ) + + def not_supported(self) -> bool: + """ + Mark the patron activity sync task as not supported. This can only be done by + the task that acquired the lock. + + :return: True if the status was set, False otherwise. + """ + return self._update( + PatronActivityStatus( + state=PatronActivityStatus.State.NOT_SUPPORTED, task_id=self._task_id + ), + self.NOT_SUPPORTED_TIMEOUT, + ) + + def __enter__(self) -> bool: + if self._in_context_manager: + raise PatronActivityError(f"Cannot nest {self.__class__.__name__}.") + self._in_context_manager = True + acquired = self.lock() + self._context_manager_acquired = acquired + return acquired + + def __exit__( + self, + exctype: type[BaseException] | None, + excinst: BaseException | None, + exctb: TracebackType | None, + ) -> Literal[False]: + if self._context_manager_acquired: + if excinst is not None: + self.log.error( + "An exception occurred during the patron activity sync.", + exc_info=excinst, + ) + self.fail() + else: + self.success() + + self._in_context_manager = False + self._context_manager_acquired = False + return False + + @classmethod + def _get_key( + cls, redis_client: Redis, patron_id: int | None, collection_id: int | None + ) -> str: + return redis_client.get_key( + "PatronActivity", + Patron.redis_key_from_id(patron_id), + Collection.redis_key_from_id(collection_id), + ) + + @classmethod + def collections_ready_for_sync( + cls, redis_client: Redis, patron: Patron + ) -> set[Collection]: + """ + Find the collections for a patron that have no records in redis for + patron activity sync. This indicates that the collection is ready to be + synced. + """ + collections = patron.library.collections + keys = [ + cls._get_key(redis_client, patron.id, collection.id) + for collection in collections + ] + statuses = redis_client.mget(keys) + return { + collection + for collection, status in zip(collections, statuses) + if status is None + } diff --git a/src/palace/manager/service/redis/redis.py b/src/palace/manager/service/redis/redis.py new file mode 100644 index 0000000000..4b5a3919ba --- /dev/null +++ b/src/palace/manager/service/redis/redis.py @@ -0,0 +1,155 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +from collections.abc import Sequence +from typing import TYPE_CHECKING, Any + +import redis + +from palace.manager.service.redis.exception import RedisValueError +from palace.manager.service.redis.key import RedisKeyGenerator + +# We do this right now because we are using types-redis, which defines Redis as a generic type, +# even though it is not actually generic. Redis 5 now support type hints natively, so we +# should be able to drop types-redis in the future. +# Currently, the built-in type hints are incomplete at best, so types-redis does a better job. +# The types-redis package description on mypy has some more information and is a good place to +# check in the future for updates: https://pypi.org/project/types-redis/. +# This GitHub issue is tracking the progress of the biggest blocker we have for using the +# built-in type hints: https://github.com/redis/redis-py/issues/2399 +if TYPE_CHECKING: + RedisClient = redis.Redis[str] + RedisPipeline = redis.client.Pipeline[str] +else: + RedisClient = redis.Redis + RedisPipeline = redis.client.Pipeline + + +class RedisPrefixCheckMixin(ABC): + """ + A mixin with functions to check that the keys used in a redis command have the expected + prefix. This is useful for ensuring that keys are namespaced correctly in a multi-tenant + environment. + + We use this mixin in our Redis and Pipeline classes. + + Some inspiration for this was taken from Kombu's Redis class. See: + https://github.com/celery/kombu/pull/1349 + """ + + class RedisCommandArgsBase(ABC): + def __init__(self, name: str): + self.name = name + + @abstractmethod + def key_args(self, args: list[Any]) -> Sequence[str]: + """ + Takes a list of arguments and returns a sequence of keys that should be checked for the + correct prefix. + """ + ... + + class RedisCommandArgs(RedisCommandArgsBase): + def __init__(self, name: str, *, args_start: int = 0, args_end: int | None = 1): + super().__init__(name) + self.args_start = args_start + self.args_end = args_end + + def key_args(self, args: list[Any]) -> Sequence[str]: + return [str(arg) for arg in args[self.args_start : self.args_end]] + + class RedisVariableCommandArgs(RedisCommandArgsBase): + def __init__(self, name: str, *, key_index: int = 0): + super().__init__(name) + self.key_index = key_index + + def key_args(self, args: list[Any]) -> Sequence[str]: + keys = int(args[self.key_index]) + args_start = self.key_index + 1 + return [str(arg) for arg in args[args_start : args_start + keys]] + + class RedisCommandNoArgs(RedisCommandArgsBase): + def key_args(self, args: list[Any]) -> Sequence[str]: + return [] + + _PREFIXED_COMMANDS = { + cmd.name: cmd + for cmd in [ + RedisCommandNoArgs("SCRIPT LOAD"), + RedisCommandArgs("KEYS"), + RedisCommandArgs("GET"), + RedisCommandArgs("GETRANGE"), + RedisCommandArgs("SET"), + RedisCommandArgs("TTL"), + RedisCommandArgs("DEL", args_end=None), + RedisCommandArgs("MGET", args_end=None), + RedisCommandArgs("EXPIRETIME"), + RedisVariableCommandArgs("EVALSHA", key_index=1), + ] + } + + def _check_prefix(self, *args: Any) -> None: + arg_list = list(args) + command = arg_list.pop(0).upper() + cmd_args = self._PREFIXED_COMMANDS.get(command) + if cmd_args is None: + raise RedisValueError( + f"Command {command} is not checked for prefix. Args: {arg_list}" + ) + + for key in cmd_args.key_args(arg_list): + if not key.startswith(self._prefix): + raise RedisValueError( + f"Key {key} does not start with prefix {self._prefix}. Command {command} args: {arg_list}" + ) + + @property + @abstractmethod + def _prefix(self) -> str: + ... + + +class Redis(RedisClient, RedisPrefixCheckMixin): + """ + A subclass of redis.Redis that adds the ability to check that keys are prefixed correctly. + """ + + def __init__(self, *args: Any, key_generator: RedisKeyGenerator, **kwargs: Any): + super().__init__(*args, **kwargs) + self.get_key: RedisKeyGenerator = key_generator + self.auto_close_connection_pool = True + + @property + def _prefix(self) -> str: + return self.get_key() + + def execute_command(self, *args: Any, **options: Any) -> Any: + self._check_prefix(*args) + return super().execute_command(*args, **options) + + def pipeline(self, transaction: bool = True, shard_hint: Any = None) -> Pipeline: + return Pipeline( + self.connection_pool, + self.response_callbacks, + transaction, + shard_hint, + key_generator=self.get_key, + ) + + +class Pipeline(RedisPipeline, RedisPrefixCheckMixin): + """ + A subclass of redis.client.Pipeline that adds the ability to check that keys are prefixed correctly. + """ + + def __init__(self, *args: Any, key_generator: RedisKeyGenerator, **kwargs: Any): + super().__init__(*args, **kwargs) + self.get_key: RedisKeyGenerator = key_generator + + @property + def _prefix(self) -> str: + return self.get_key() + + def execute_command(self, *args: Any, **options: Any) -> Any: + self._check_prefix(*args) + return super().execute_command(*args, **options) diff --git a/src/palace/manager/sqlalchemy/model/collection.py b/src/palace/manager/sqlalchemy/model/collection.py index 3504e70b8e..fa42d33fe9 100644 --- a/src/palace/manager/sqlalchemy/model/collection.py +++ b/src/palace/manager/sqlalchemy/model/collection.py @@ -21,6 +21,7 @@ from palace.manager.core.exceptions import BasePalaceException from palace.manager.integration.goals import Goals +from palace.manager.service.redis.key import RedisKeyMixin from palace.manager.sqlalchemy.constants import DataSourceConstants, EditionConstants from palace.manager.sqlalchemy.hassessioncache import HasSessionCache from palace.manager.sqlalchemy.hybrid import hybrid_property @@ -45,7 +46,7 @@ T = TypeVar("T") -class Collection(Base, HasSessionCache): +class Collection(Base, HasSessionCache, RedisKeyMixin): """A Collection is a set of LicensePools obtained through some mechanism.""" diff --git a/src/palace/manager/sqlalchemy/model/licensing.py b/src/palace/manager/sqlalchemy/model/licensing.py index 6d31df74d3..006cc2c755 100644 --- a/src/palace/manager/sqlalchemy/model/licensing.py +++ b/src/palace/manager/sqlalchemy/model/licensing.py @@ -1041,12 +1041,6 @@ def loan_to( create_method_kwargs=kwargs, ) - if is_new: - # This action creates uncertainty about what the patron's - # loan activity actually is. We'll need to sync with the - # vendor APIs. - patron.last_loan_activity_sync = None - if fulfillment: loan.fulfillment = fulfillment if external_identifier: @@ -1066,11 +1060,6 @@ def on_hold_to( raise PolicyException("Holds are disabled for this library.") start = start or utc_now() hold, new = get_one_or_create(_db, Hold, patron=patron, license_pool=self) - # This action creates uncertainty about what the patron's - # loan activity actually is. We'll need to sync with the - # vendor APIs. - if new: - patron.last_loan_activity_sync = None hold.update(start, end, position) if external_identifier: hold.external_identifier = external_identifier diff --git a/src/palace/manager/sqlalchemy/model/patron.py b/src/palace/manager/sqlalchemy/model/patron.py index fcfee128c1..fa3734ffd1 100644 --- a/src/palace/manager/sqlalchemy/model/patron.py +++ b/src/palace/manager/sqlalchemy/model/patron.py @@ -24,6 +24,7 @@ from palace.manager.core.classifier import Classifier from palace.manager.core.user_profile import ProfileStorage +from palace.manager.service.redis.key import RedisKeyMixin from palace.manager.sqlalchemy.constants import LinkRelations from palace.manager.sqlalchemy.hybrid import hybrid_property from palace.manager.sqlalchemy.model.base import Base @@ -63,7 +64,7 @@ def library(self): return None -class Patron(Base): +class Patron(Base, RedisKeyMixin): __tablename__ = "patrons" id = Column(Integer, primary_key=True) @@ -103,10 +104,9 @@ class Patron(Base): # system such as an ILS. last_external_sync = Column(DateTime(timezone=True)) - # The last time this record was synced with the corresponding - # records managed by the vendors who provide the library with - # ebooks. - _last_loan_activity_sync = Column( + # TODO: This field is no longer used. Its left here for backwards compatibility until + # the release with this change is deployed. It should be removed after that. + _REMOVED_last_loan_activity_sync = Column( "last_loan_activity_sync", DateTime(timezone=True), default=None ) @@ -253,46 +253,6 @@ def works_on_loan_or_on_hold(self): loans = self.works_on_loan() return set(holds + loans) - @property - def loan_activity_max_age(self): - """In the absence of any other information, how long should loan - activity be considered 'fresh' for this patron? - - We reset Patron.last_loan_activity_sync immediately if we hear - about a change to a patron's loans or holds. This handles - cases where patron activity happens where we can't see it, - e.g. on a vendor website or mobile app. - - TODO: This is currently a constant, but in the future it could become - a per-library setting. - """ - return 15 * 60 - - def is_last_loan_activity_stale(self) -> bool: - """Has the last_loan_activity_sync timestamp outlived the loan_activity_max_age seconds""" - if not self._last_loan_activity_sync: - return True - - return utc_now() > self._last_loan_activity_sync + datetime.timedelta( - seconds=self.loan_activity_max_age - ) - - @property - def last_loan_activity_sync(self): - """When was the last time we asked the vendors about - this patron's loan activity? - - :return: A datetime, or None if we know our loan data is - stale. - """ - if self.is_last_loan_activity_stale(): - return None - return self._last_loan_activity_sync - - @last_loan_activity_sync.setter - def last_loan_activity_sync(self, value): - self._last_loan_activity_sync = value - @hybrid_property def synchronize_annotations(self): return self._synchronize_annotations diff --git a/src/palace/manager/util/datetime_helpers.py b/src/palace/manager/util/datetime_helpers.py index 9212bc7333..9603ead7a8 100644 --- a/src/palace/manager/util/datetime_helpers.py +++ b/src/palace/manager/util/datetime_helpers.py @@ -1,5 +1,7 @@ import datetime -from typing import overload +from collections.abc import Callable +from functools import wraps +from typing import ParamSpec, TypeVar, overload import pytz from dateutil.relativedelta import relativedelta @@ -11,15 +13,26 @@ # https://docs.python.org/3/library/datetime.html#aware-and-naive-objects -def datetime_utc(*args, **kwargs) -> datetime.datetime: - """Return a datetime object but with UTC information from pytz. - :return: datetime object - """ - kwargs["tzinfo"] = pytz.UTC - return datetime.datetime(*args, **kwargs) +P = ParamSpec("P") +T = TypeVar("T") + + +def _wrapper(func: Callable[P, T]) -> Callable[P, T]: + @wraps(func) + def wrapper(*args: P.args, **kwargs: P.kwargs) -> T: + kwargs["tzinfo"] = pytz.UTC + return func(*args, **kwargs) + + return wrapper + + +datetime_utc = _wrapper(datetime.datetime) +""" +Return a datetime object but with UTC information from pytz. +""" -def from_timestamp(ts) -> datetime.datetime: +def from_timestamp(ts: float) -> datetime.datetime: """Return a UTC datetime object from a timestamp. :return: datetime object diff --git a/tests/conftest.py b/tests/conftest.py index e09335a58b..93e3085d3e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -16,6 +16,7 @@ "tests.fixtures.flask", "tests.fixtures.library", "tests.fixtures.odl", + "tests.fixtures.redis", "tests.fixtures.s3", "tests.fixtures.search", "tests.fixtures.services", diff --git a/tests/fixtures/database.py b/tests/fixtures/database.py index fe721cec4e..b6b8b509f2 100644 --- a/tests/fixtures/database.py +++ b/tests/fixtures/database.py @@ -528,6 +528,7 @@ def collection( password=None, data_source_name=None, settings: dict[str, Any] | None = None, + library: Library | None = None, ) -> Collection: name = name or self.fresh_str() collection, _ = Collection.by_name_and_protocol(self.session, name, protocol) @@ -544,6 +545,8 @@ def collection( if data_source_name: collection.data_source = data_source_name + if library: + collection.libraries.append(library) return collection def work( diff --git a/tests/fixtures/redis.py b/tests/fixtures/redis.py new file mode 100644 index 0000000000..68122ead74 --- /dev/null +++ b/tests/fixtures/redis.py @@ -0,0 +1,55 @@ +from collections.abc import Generator +from contextlib import contextmanager + +import pytest +from pydantic import RedisDsn +from typing_extensions import Self + +from palace.manager.service.redis.redis import Redis +from tests.fixtures.config import FixtureTestUrlConfiguration +from tests.fixtures.database import TestIdFixture +from tests.fixtures.services import ServicesFixture + + +class RedisTestConfiguration(FixtureTestUrlConfiguration): + url: RedisDsn + + class Config: + env_prefix = "PALACE_TEST_REDIS_" + + +class RedisFixture: + def __init__(self, test_id: TestIdFixture, services_fixture: ServicesFixture): + self.test_id = test_id + self.services_fixture = services_fixture + self.config = RedisTestConfiguration.from_env() + + self.key_prefix = f"test::{self.test_id.id}" + self.services_fixture.services.config.from_dict( + {"redis": {"url": self.config.url, "key_prefix": self.key_prefix}} + ) + self.client: Redis = self.services_fixture.services.redis.client() + + def close(self): + keys = self.client.keys(f"{self.key_prefix}*") + if keys: + self.client.delete(*keys) + + @classmethod + @contextmanager + def fixture( + cls, test_id: TestIdFixture, services_fixture: ServicesFixture + ) -> Generator[Self, None, None]: + fixture = cls(test_id, services_fixture) + try: + yield fixture + finally: + fixture.close() + + +@pytest.fixture(scope="function") +def redis_fixture( + function_test_id: TestIdFixture, services_fixture: ServicesFixture +) -> Generator[RedisFixture, None, None]: + with RedisFixture.fixture(function_test_id, services_fixture) as fixture: + yield fixture diff --git a/tests/manager/api/controller/test_base.py b/tests/manager/api/controller/test_base.py index 35f9e89d85..592c725cea 100644 --- a/tests/manager/api/controller/test_base.py +++ b/tests/manager/api/controller/test_base.py @@ -1,6 +1,4 @@ import datetime -import email -import random from unittest.mock import MagicMock, patch import flask @@ -25,7 +23,7 @@ from palace.manager.sqlalchemy.model.patron import Patron from palace.manager.sqlalchemy.model.resource import Representation from palace.manager.sqlalchemy.util import create, tuple_to_numericrange -from palace.manager.util.datetime_helpers import datetime_utc, utc_now +from palace.manager.util.datetime_helpers import utc_now from palace.manager.util.problem_detail import ProblemDetail from tests.fixtures.api_controller import CirculationControllerFixture from tests.fixtures.library import LibraryFixture @@ -194,81 +192,6 @@ def test_authentication_sends_proper_headers( response = circulation_fixture.controller.authenticate() assert None == response.headers.get("WWW-Authenticate") - def test_handle_conditional_request( - self, circulation_fixture: CirculationControllerFixture - ): - # First, test success: the client provides If-Modified-Since - # and it is _not_ earlier than the 'last modified' date known by - # the server. - - now_datetime = utc_now() - now_string = email.utils.format_datetime(now_datetime) - - # To make the test more realistic, set a meaningless - # microseconds value of 'now'. - now_datetime = now_datetime.replace(microsecond=random.randint(0, 999999)) - - with circulation_fixture.app.test_request_context( - headers={"If-Modified-Since": now_string} - ): - response = circulation_fixture.controller.handle_conditional_request( - now_datetime - ) - assert response is not None - assert 304 == response.status_code - - # Try with a few specific values that comply to a greater or lesser - # extent with the date-format spec. - very_old = datetime_utc(2000, 1, 1) - for value in [ - "Thu, 01 Aug 2019 10:00:40 -0000", - "Thu, 01 Aug 2019 10:00:40", - "01 Aug 2019 10:00:40", - ]: - with circulation_fixture.app.test_request_context( - headers={"If-Modified-Since": value} - ): - response = circulation_fixture.controller.handle_conditional_request( - very_old - ) - assert response is not None - assert 304 == response.status_code - - # All remaining test cases are failures: for whatever reason, - # the request is not a valid conditional request and the - # method returns None. - - with circulation_fixture.app.test_request_context( - headers={"If-Modified-Since": now_string} - ): - # This request _would_ be a conditional request, but the - # precondition fails: If-Modified-Since is earlier than - # the 'last modified' date known by the server. - newer = now_datetime + datetime.timedelta(seconds=10) - response = circulation_fixture.controller.handle_conditional_request(newer) - assert None == response - - # Here, the server doesn't know what the 'last modified' date is, - # so it can't evaluate the precondition. - response = circulation_fixture.controller.handle_conditional_request(None) - assert None == response - - # Here, the precondition string is not parseable as a datetime. - with circulation_fixture.app.test_request_context( - headers={"If-Modified-Since": "01 Aug 2019"} - ): - response = circulation_fixture.controller.handle_conditional_request( - very_old - ) - assert None == response - - # Here, the client doesn't provide a precondition at all. - with circulation_fixture.app.test_request_context(): - response = circulation_fixture.controller.handle_conditional_request( - very_old - ) - assert None == response - def test_load_licensepools(self, circulation_fixture: CirculationControllerFixture): # Here's a Library that has two Collections. library = circulation_fixture.library diff --git a/tests/manager/api/controller/test_loan.py b/tests/manager/api/controller/test_loan.py index 8a593b13d4..57db5b8d14 100644 --- a/tests/manager/api/controller/test_loan.py +++ b/tests/manager/api/controller/test_loan.py @@ -1,8 +1,8 @@ import datetime -import urllib.parse from collections.abc import Generator from decimal import Decimal -from unittest.mock import MagicMock, patch +from unittest.mock import MagicMock, create_autospec, patch +from urllib.parse import quote import feedparser import pytest @@ -43,6 +43,7 @@ from palace.manager.core.opds_import import OPDSAPI from palace.manager.core.problem_details import INTEGRATION_ERROR, INVALID_INPUT from palace.manager.feed.serializer.opds2 import OPDS2Serializer +from palace.manager.service.redis.models.patron_activity import PatronActivity from palace.manager.sqlalchemy.constants import MediaTypes from palace.manager.sqlalchemy.model.collection import Collection from palace.manager.sqlalchemy.model.datasource import DataSource @@ -57,7 +58,7 @@ from palace.manager.sqlalchemy.model.resource import Representation from palace.manager.sqlalchemy.model.work import Work from palace.manager.sqlalchemy.util import get_one, get_one_or_create -from palace.manager.util.datetime_helpers import datetime_utc, utc_now +from palace.manager.util.datetime_helpers import utc_now from palace.manager.util.flask_util import OPDSEntryResponse, Response from palace.manager.util.http import RemoteIntegrationException from palace.manager.util.opds_writer import AtomFeed, OPDSFeed @@ -65,7 +66,9 @@ from tests.fixtures.api_controller import CirculationControllerFixture from tests.fixtures.database import DatabaseTransactionFixture from tests.fixtures.library import LibraryFixture +from tests.fixtures.redis import RedisFixture from tests.fixtures.services import ServicesFixture +from tests.mocks.circulation import MockPatronActivityCirculationAPI from tests.mocks.mock import DummyHTTPClient @@ -101,6 +104,12 @@ def __init__( self.identifier_identifier = self.identifier.identifier self.identifier_type = self.identifier.type + # Make sure our collection has a PatronActivityCirculationAPI setup, so we can test the + # patron activity sync tasks. + self.manager.d_circulation.add_remote_api( + self.pool, MockPatronActivityCirculationAPI(db.session, self.collection) + ) + @pytest.fixture(scope="function") def loan_fixture( @@ -277,10 +286,13 @@ def test_borrow_success( # Create a new loan. with loan_fixture.request_context_with_library("/", headers=headers): - loan_fixture.manager.loans.authenticated_patron_from_request() - borrow_response = loan_fixture.manager.loans.borrow( - loan_fixture.identifier_type, loan_fixture.identifier_identifier - ) + patron = loan_fixture.manager.loans.authenticated_patron_from_request() + with patch( + "palace.manager.api.controller.loan.sync_patron_activity" + ) as sync_task: + borrow_response = loan_fixture.manager.loans.borrow( + loan_fixture.identifier_type, loan_fixture.identifier_identifier + ) loan = get_one( loan_fixture.db.session, Loan, license_pool=loan_fixture.pool ) @@ -289,6 +301,18 @@ def test_borrow_success( assert isinstance(borrow_response, FlaskResponse) assert 201 == borrow_response.status_code + # And queue up a task to sync the patron's activity. + assert isinstance(patron, Patron) + sync_task.apply_async.assert_called_once_with( + ( + loan_fixture.pool.collection.id, + patron.id, + loan_fixture.valid_credentials["password"], + ), + {"force": True}, + countdown=5, + ) + # A loan has been created for this license pool. assert loan is not None # The loan has yet to be fulfilled. @@ -301,9 +325,12 @@ def test_borrow_success( # Borrow again with an existing loan. with loan_fixture.request_context_with_library("/", headers=headers): loan_fixture.manager.loans.authenticated_patron_from_request() - borrow_response = loan_fixture.manager.loans.borrow( - loan_fixture.identifier_type, loan_fixture.identifier_identifier - ) + with patch( + "palace.manager.api.controller.loan.sync_patron_activity" + ) as sync_task: + borrow_response = loan_fixture.manager.loans.borrow( + loan_fixture.identifier_type, loan_fixture.identifier_identifier + ) # A loan has been created for this license pool. loan = get_one( @@ -313,6 +340,9 @@ def test_borrow_success( assert isinstance(borrow_response, OPDSEntryResponse) assert 200 == borrow_response.status_code + # Because the loan was existing, we didn't queue up a task to sync the patron's activity. + sync_task.apply_async.assert_not_called() + # There is still a loan that has not yet been fulfilled. assert loan is not None assert loan.fulfillment is None @@ -481,11 +511,16 @@ def test_borrow_and_fulfill_with_streaming_delivery_mechanism( utc_now() + datetime.timedelta(seconds=3600), ), ) - borrow_response = loan_fixture.manager.loans.borrow( - identifier.type, identifier.identifier - ) + with patch( + "palace.manager.api.controller.loan.sync_patron_activity" + ) as sync_task: + borrow_response = loan_fixture.manager.loans.borrow( + identifier.type, identifier.identifier + ) assert isinstance(borrow_response, Response) + sync_task.apply_async.assert_called_once() + # A loan has been created for this license pool. loan = get_one(loan_fixture.db.session, Loan, license_pool=pool) assert loan is not None @@ -1105,7 +1140,7 @@ def test_revoke_loan( {"Authorization": loan_fixture.valid_auth} ) - # Create a new loan. + # Create a loan and revoke it with loan_fixture.request_context_with_library("/", headers=headers): patron = loan_fixture.manager.loans.authenticated_patron_from_request() assert isinstance(patron, Patron) @@ -1113,10 +1148,66 @@ def test_revoke_loan( loan_fixture.manager.d_circulation.queue_checkin(loan_fixture.pool) - response = loan_fixture.manager.loans.revoke(loan_fixture.pool_id) + with patch( + "palace.manager.api.controller.loan.sync_patron_activity" + ) as sync_task: + response = loan_fixture.manager.loans.revoke(loan_fixture.pool_id) + + assert 200 == response.status_code + serialization_helper.verify_and_get_single_entry_feed_links(response) + + # We queued up a sync_patron_activity_collection task + sync_task.apply_async.assert_called_once_with( + ( + loan_fixture.pool.collection.id, + patron.id, + loan_fixture.valid_credentials["password"], + ), + {"force": True}, + countdown=5, + ) - assert 200 == response.status_code - _ = serialization_helper.verify_and_get_single_entry_feed_links(response) + @pytest.mark.parametrize( + *OPDSSerializationTestHelper.PARAMETRIZED_SINGLE_ENTRY_ACCEPT_HEADERS + ) + def test_revoke_loan_no_patron_activity_support( + self, + loan_fixture: LoanFixture, + accept_header: str | None, + expected_content_type: str, + ): + serialization_helper = OPDSSerializationTestHelper( + accept_header, expected_content_type + ) + headers = serialization_helper.merge_accept_header( + {"Authorization": loan_fixture.valid_auth} + ) + + # Create a loan and revoke it + with loan_fixture.request_context_with_library("/", headers=headers): + patron = loan_fixture.manager.loans.authenticated_patron_from_request() + assert isinstance(patron, Patron) + loan, newly_created = loan_fixture.pool.loan_to(patron) + + mock_supports_patron_activity = create_autospec( + loan_fixture.manager.d_circulation.supports_patron_activity, + return_value=False, + ) + loan_fixture.manager.d_circulation.supports_patron_activity = ( + mock_supports_patron_activity + ) + loan_fixture.manager.d_circulation.queue_checkin(loan_fixture.pool) + + with patch( + "palace.manager.api.controller.loan.sync_patron_activity" + ) as sync_task: + response = loan_fixture.manager.loans.revoke(loan_fixture.pool_id) + + assert 200 == response.status_code + serialization_helper.verify_and_get_single_entry_feed_links(response) + + mock_supports_patron_activity.assert_called_once_with(loan_fixture.pool) + sync_task.apply_async.assert_not_called() def test_revoke_loan_exception( self, @@ -1132,11 +1223,17 @@ def test_revoke_loan_exception( patron = loan_fixture.manager.loans.authenticated_patron_from_request() assert isinstance(patron, Patron) loan_fixture.pool.loan_to(patron) - response = loan_fixture.manager.loans.revoke(loan_fixture.pool_id) + with patch( + "palace.manager.api.controller.loan.sync_patron_activity" + ) as sync_task: + response = loan_fixture.manager.loans.revoke(loan_fixture.pool_id) assert isinstance(response, ProblemDetail) assert response == COULD_NOT_MIRROR_TO_REMOTE + # Because of the error we did not queue up a sync_patron_activity_collection task + sync_task.apply_async.assert_not_called() + @pytest.mark.parametrize( *OPDSSerializationTestHelper.PARAMETRIZED_SINGLE_ENTRY_ACCEPT_HEADERS ) @@ -1160,10 +1257,24 @@ def test_revoke_hold( loan_fixture.manager.d_circulation.queue_release_hold(loan_fixture.pool) - response = loan_fixture.manager.loans.revoke(loan_fixture.pool_id) - - assert 200 == response.status_code - _ = serialization_helper.verify_and_get_single_entry_feed_links(response) + with patch( + "palace.manager.api.controller.loan.sync_patron_activity" + ) as sync_task: + response = loan_fixture.manager.loans.revoke(loan_fixture.pool_id) + + assert 200 == response.status_code + _ = serialization_helper.verify_and_get_single_entry_feed_links(response) + + # We queued up a sync_patron_activity_collection task + sync_task.apply_async.assert_called_once_with( + ( + loan_fixture.pool.collection.id, + patron.id, + loan_fixture.valid_credentials["password"], + ), + {"force": True}, + countdown=5, + ) def test_revoke_hold_exception( self, @@ -1179,11 +1290,17 @@ def test_revoke_hold_exception( patron = loan_fixture.manager.loans.authenticated_patron_from_request() assert isinstance(patron, Patron) loan_fixture.pool.on_hold_to(patron, position=0) - response = loan_fixture.manager.loans.revoke(loan_fixture.pool_id) + with patch( + "palace.manager.api.controller.loan.sync_patron_activity" + ) as sync_task: + response = loan_fixture.manager.loans.revoke(loan_fixture.pool_id) assert isinstance(response, ProblemDetail) assert response == CANNOT_RELEASE_HOLD + # Because of the error we did not queue up a sync_patron_activity_collection task + sync_task.apply_async.assert_not_called() + def test_revoke_hold_nonexistent_licensepool(self, loan_fixture: LoanFixture): with loan_fixture.request_context_with_library( "/", headers=dict(Authorization=loan_fixture.valid_auth) @@ -1310,200 +1427,137 @@ def test_3m_cant_revoke_hold_if_reserved(self, loan_fixture: LoanFixture): == response.detail ) - def test_active_loans(self, loan_fixture: LoanFixture): - # First, verify that this controller supports conditional HTTP - # GET by calling handle_conditional_request and propagating - # any Response it returns. - response_304 = Response(status=304) - - def handle_conditional_request(last_modified=None): - return response_304 - - original_handle_conditional_request = ( - loan_fixture.controller.handle_conditional_request - ) - loan_fixture.manager.loans.handle_conditional_request = ( - handle_conditional_request - ) - - # Before making any requests, set the patron's last_loan_activity_sync - # to a known value. - patron = None - with loan_fixture.request_context_with_library("/"): - patron = loan_fixture.controller.authenticated_patron( - loan_fixture.valid_credentials - ) - now = utc_now() - patron.last_loan_activity_sync = now - - # Make a request -- it doesn't have If-Modified-Since, but our - # mocked handle_conditional_request will treat it as a - # successful conditional request. - with loan_fixture.request_context_with_library( - "/", headers=dict(Authorization=loan_fixture.valid_auth) - ): - patron = loan_fixture.manager.loans.authenticated_patron_from_request() - response = loan_fixture.manager.loans.sync() - assert response is response_304 - - # Since the conditional request succeeded, we did not call out - # to the vendor APIs, and patron.last_loan_activity_sync was - # not updated. - assert now == patron.last_loan_activity_sync - - # Leaving patron.last_loan_activity_sync alone will stop the - # circulation manager from calling out to the external APIs, - # since it was set to a recent time. We test this explicitly - # later, but for now, clear it out. - patron.last_loan_activity_sync = None - - # Un-mock handle_conditional_request. It will be called over - # the course of this test, but it will not notice any more - # conditional requests -- the detailed behavior of - # handle_conditional_request is tested elsewhere. - loan_fixture.manager.loans.handle_conditional_request = ( - original_handle_conditional_request - ) - - # If the request is not conditional, an OPDS feed is returned. - # This feed is empty because the patron has no loans. + def test_active_loans( + self, + db: DatabaseTransactionFixture, + loan_fixture: LoanFixture, + redis_fixture: RedisFixture, + ): + # An OPDS feed is returned. This feed is empty because the patron has no loans. with loan_fixture.request_context_with_library( "/", headers=dict(Authorization=loan_fixture.valid_auth) ): patron = loan_fixture.manager.loans.authenticated_patron_from_request() - response = loan_fixture.manager.loans.sync() - assert not "" in response.get_data(as_text=True) - assert response.headers["Cache-Control"].startswith("private,") - - # patron.last_loan_activity_sync was set to the moment the - # LoanController started calling out to the remote APIs. - new_sync_time = patron.last_loan_activity_sync - assert new_sync_time > now - - # Set up a bunch of loans on the remote APIs. - overdrive_edition, overdrive_pool = loan_fixture.db.edition( + with patch( + "palace.manager.api.controller.loan.sync_patron_activity" + ) as sync_task: + response = loan_fixture.manager.loans.sync() + assert not "" in response.get_data(as_text=True) + assert response.headers["Cache-Control"].startswith("private,") + + # We queued up a sync_patron_activity task to go sync the patrons information + assert isinstance(patron, Patron) + assert sync_task.apply_async.call_count == len(patron.library.collections) + for library in patron.library.collections: + sync_task.apply_async.assert_any_call( + (library.id, patron.id, loan_fixture.valid_credentials["password"]), + ) + + # Set up some loans and holds + overdrive_edition, overdrive_pool = db.edition( with_open_access_download=False, data_source_name=DataSource.OVERDRIVE, identifier_type=Identifier.OVERDRIVE_ID, with_license_pool=True, ) - overdrive_book = loan_fixture.db.work( + overdrive_book = db.work( presentation_edition=overdrive_edition, ) overdrive_pool.open_access = False + now = utc_now() + overdrive_pool.loan_to(patron, now, now + datetime.timedelta(seconds=3600)) - bibliotheca_edition, bibliotheca_pool = loan_fixture.db.edition( + bibliotheca_edition, bibliotheca_pool = db.edition( with_open_access_download=False, data_source_name=DataSource.BIBLIOTHECA, identifier_type=Identifier.BIBLIOTHECA_ID, with_license_pool=True, ) - bibliotheca_book = loan_fixture.db.work( + bibliotheca_book = db.work( presentation_edition=bibliotheca_edition, ) bibliotheca_pool.licenses_available = 0 bibliotheca_pool.open_access = False - - loan_fixture.manager.d_circulation.add_remote_loan( - LoanInfo( - overdrive_pool.collection, - overdrive_pool.data_source, - overdrive_pool.identifier.type, - overdrive_pool.identifier.identifier, - utc_now(), - utc_now() + datetime.timedelta(seconds=3600), - ) - ) - loan_fixture.manager.d_circulation.add_remote_hold( - HoldInfo( - bibliotheca_pool.collection, - bibliotheca_pool.data_source, - bibliotheca_pool.identifier.type, - bibliotheca_pool.identifier.identifier, - utc_now(), - utc_now() + datetime.timedelta(seconds=3600), - 0, - ) + bibliotheca_pool.on_hold_to( + patron, now, now + datetime.timedelta(seconds=3600), 0 ) - # Making a new request so soon after the last one means the - # circulation manager won't actually call out to the vendor - # APIs. The resulting feed won't reflect what we know to be - # the reality. - with loan_fixture.request_context_with_library( - "/", headers=dict(Authorization=loan_fixture.valid_auth) - ): - patron = loan_fixture.manager.loans.authenticated_patron_from_request() - response = loan_fixture.manager.loans.sync() - assert "" not in response.get_data(as_text=True) - - # patron.last_loan_activity_sync was not changed as the result - # of this request, since we didn't go to the vendor APIs. - assert patron.last_loan_activity_sync == new_sync_time - - # Change it now, to a timestamp far in the past. - long_ago = datetime_utc(2000, 1, 1) - patron.last_loan_activity_sync = long_ago + # Add a collection, that doesn't need to be synced + collection_already_synced = db.collection(library=patron.library) + patron_activity = PatronActivity( + redis_client=redis_fixture.client, + collection=collection_already_synced, + patron=patron, + task_id="test", + ) + patron_activity.lock() + patron_activity.success() - # This ensures that when we request the loans feed again, the - # LoanController actually goes out to the vendor APIs for new - # information. + # The loans are returned in the feed. with loan_fixture.request_context_with_library( "/", headers=dict(Authorization=loan_fixture.valid_auth) ): - patron = loan_fixture.manager.loans.authenticated_patron_from_request() - response = loan_fixture.manager.loans.sync() - - # This time, the feed contains entries. - feed = feedparser.parse(response.data) - entries = feed["entries"] - - overdrive_entry = [ - entry for entry in entries if entry["title"] == overdrive_book.title - ][0] - bibliotheca_entry = [ - entry for entry in entries if entry["title"] == bibliotheca_book.title - ][0] - - assert overdrive_entry["opds_availability"]["status"] == "available" - assert bibliotheca_entry["opds_availability"]["status"] == "ready" - - overdrive_links = overdrive_entry["links"] - fulfill_link = [ - x - for x in overdrive_links - if x["rel"] == "http://opds-spec.org/acquisition" - ][0]["href"] - revoke_link = [ - x for x in overdrive_links if x["rel"] == OPDSFeed.REVOKE_LOAN_REL - ][0]["href"] - bibliotheca_links = bibliotheca_entry["links"] - borrow_link = [ - x - for x in bibliotheca_links - if x["rel"] == "http://opds-spec.org/acquisition/borrow" - ][0]["href"] - bibliotheca_revoke_links = [ - x for x in bibliotheca_links if x["rel"] == OPDSFeed.REVOKE_LOAN_REL - ] - - assert urllib.parse.quote("%s/fulfill" % overdrive_pool.id) in fulfill_link - assert urllib.parse.quote("%s/revoke" % overdrive_pool.id) in revoke_link - assert ( - urllib.parse.quote( - "%s/%s/borrow" - % ( - bibliotheca_pool.identifier.type, - bibliotheca_pool.identifier.identifier, - ) + loan_fixture.manager.loans.authenticated_patron_from_request() + with patch( + "palace.manager.api.controller.loan.sync_patron_activity" + ) as sync_task: + response = loan_fixture.manager.loans.sync() + + # This time, the feed contains entries. + feed = feedparser.parse(response.data) + entries = feed["entries"] + + overdrive_entry = [ + entry for entry in entries if entry["title"] == overdrive_book.title + ][0] + bibliotheca_entry = [ + entry for entry in entries if entry["title"] == bibliotheca_book.title + ][0] + + assert overdrive_entry["opds_availability"]["status"] == "available" + assert bibliotheca_entry["opds_availability"]["status"] == "ready" + + overdrive_links = overdrive_entry["links"] + fulfill_link = [ + x for x in overdrive_links if x["rel"] == "http://opds-spec.org/acquisition" + ][0]["href"] + revoke_link = [ + x for x in overdrive_links if x["rel"] == OPDSFeed.REVOKE_LOAN_REL + ][0]["href"] + bibliotheca_links = bibliotheca_entry["links"] + borrow_link = [ + x + for x in bibliotheca_links + if x["rel"] == "http://opds-spec.org/acquisition/borrow" + ][0]["href"] + bibliotheca_revoke_links = [ + x for x in bibliotheca_links if x["rel"] == OPDSFeed.REVOKE_LOAN_REL + ] + + assert quote("%s/fulfill" % overdrive_pool.id) in fulfill_link + assert quote("%s/revoke" % overdrive_pool.id) in revoke_link + assert ( + quote( + "%s/%s/borrow" + % ( + bibliotheca_pool.identifier.type, + bibliotheca_pool.identifier.identifier, ) - in borrow_link ) - assert 0 == len(bibliotheca_revoke_links) - - # Since we went out the the vendor APIs, - # patron.last_loan_activity_sync was updated. - assert patron.last_loan_activity_sync > new_sync_time + in borrow_link + ) + assert 0 == len(bibliotheca_revoke_links) + + # We queued up a sync_patron_activity task to go sync the patrons information, + # but only for the collections that needed to be synced + assert sync_task.apply_async.call_count == 1 + sync_task.apply_async.assert_any_call( + ( + loan_fixture.collection.id, + patron.id, + loan_fixture.valid_credentials["password"], + ), + ) @pytest.mark.parametrize( "refresh,expected_sync_call_count", @@ -1521,6 +1575,7 @@ def handle_conditional_request(last_modified=None): def test_loans_refresh( self, loan_fixture: LoanFixture, + redis_fixture: RedisFixture, refresh: str | None, expected_sync_call_count: int, ): @@ -1529,11 +1584,13 @@ def test_loans_refresh( loan_fixture.request_context_with_library( url, headers=dict(Authorization=loan_fixture.valid_auth) ), - patch.object(loan_fixture.manager.d_circulation, "sync_bookshelf") as sync, + patch( + "palace.manager.api.controller.loan.sync_patron_activity" + ) as sync_task, ): loan_fixture.manager.loans.authenticated_patron_from_request() loan_fixture.manager.loans.sync() - assert sync.call_count == expected_sync_call_count + assert sync_task.apply_async.call_count == expected_sync_call_count @pytest.mark.parametrize( "target_loan_duration, " @@ -1711,9 +1768,10 @@ def create_work_and_return_license_pool_and_loan_info(**kwargs): loan_fixture.manager.d_circulation.queue_checkout(license_pool, loan_info) - response = loan_fixture.manager.loans.borrow( - license_pool.identifier.type, license_pool.identifier.identifier - ) + with patch("palace.manager.api.controller.loan.sync_patron_activity"): + response = loan_fixture.manager.loans.borrow( + license_pool.identifier.type, license_pool.identifier.identifier + ) loan = get_one(loan_fixture.db.session, Loan, license_pool=license_pool) assert loan is not None diff --git a/tests/manager/api/test_bibliotheca.py b/tests/manager/api/test_bibliotheca.py index c208d1a478..b38965ea89 100644 --- a/tests/manager/api/test_bibliotheca.py +++ b/tests/manager/api/test_bibliotheca.py @@ -6,7 +6,7 @@ from io import BytesIO, StringIO from typing import TYPE_CHECKING, cast from unittest import mock -from unittest.mock import MagicMock, create_autospec, patch +from unittest.mock import MagicMock, create_autospec import pytest from pymarc import parse_xml_to_array @@ -30,7 +30,6 @@ FulfillmentInfo, HoldInfo, LoanInfo, - PatronActivityThread, ) from palace.manager.api.circulation_exceptions import ( AlreadyCheckedOut, @@ -454,7 +453,7 @@ def test_marc_request(self, bibliotheca_fixture: BibliothecaAPITestFixture): with pytest.raises(RemoteInitiatedServerError) as excinfo: [x for x in bibliotheca_fixture.api.marc_request(start, end, 10, 20)] - def test_sync_bookshelf(self, bibliotheca_fixture: BibliothecaAPITestFixture): + def test_sync_patron_activity(self, bibliotheca_fixture: BibliothecaAPITestFixture): db = bibliotheca_fixture.db patron = db.patron() circulation = CirculationAPI( @@ -467,10 +466,7 @@ def test_sync_bookshelf(self, bibliotheca_fixture: BibliothecaAPITestFixture): 200, content=bibliotheca_fixture.files.sample_data("checkouts.xml") ) - with patch.object( - PatronActivityThread, "api", return_value=bibliotheca_fixture.api - ): - circulation.sync_bookshelf(patron, "dummy pin") + bibliotheca_fixture.api.sync_patron_activity(patron, "dummy pin") # The patron should have two loans and two holds. l1, l2 = patron.loans diff --git a/tests/manager/api/test_circulationapi.py b/tests/manager/api/test_circulationapi.py index ec898ee2a2..9510b9025d 100644 --- a/tests/manager/api/test_circulationapi.py +++ b/tests/manager/api/test_circulationapi.py @@ -2,7 +2,7 @@ import datetime from datetime import timedelta from typing import cast -from unittest.mock import MagicMock, create_autospec, patch +from unittest.mock import MagicMock, create_autospec import flask import pytest @@ -17,7 +17,6 @@ FulfillmentInfo, HoldInfo, LoanInfo, - PatronActivityThread, ) from palace.manager.api.circulation_exceptions import ( AlreadyCheckedOut, @@ -43,11 +42,14 @@ from palace.manager.sqlalchemy.model.resource import Hyperlink, Representation from palace.manager.util.datetime_helpers import utc_now from tests.fixtures.database import DatabaseTransactionFixture -from tests.fixtures.files import BibliothecaFilesFixture from tests.fixtures.library import LibraryFixture from tests.mocks.analytics_provider import MockAnalyticsProvider from tests.mocks.bibliotheca import MockBibliothecaAPI -from tests.mocks.circulation import MockCirculationAPI, MockPatronActivityCirculationAPI +from tests.mocks.circulation import ( + MockBaseCirculationAPI, + MockCirculationAPI, + MockPatronActivityCirculationAPI, +) class CirculationAPIFixture: @@ -96,12 +98,6 @@ def borrow(circulation_api: CirculationAPIFixture): circulation_api.delivery_mechanism, ) - @staticmethod - def sync_bookshelf(circulation_api: CirculationAPIFixture): - return circulation_api.circulation.sync_bookshelf( - circulation_api.patron, "1234" - ) - def test_circulationinfo_collection_id( self, circulation_api: CirculationAPIFixture ): @@ -399,43 +395,6 @@ def test_hold_sends_analytics_event(self, circulation_api: CirculationAPIFixture # sent. assert 1 == circulation_api.analytics.count - def test_loan_becomes_hold_if_no_available_copies_and_preexisting_loan( - self, circulation_api: CirculationAPIFixture - ): - # Once upon a time, we had a loan for this book. - loan, ignore = circulation_api.pool.loan_to(circulation_api.patron) - loan.start = self.YESTERDAY - - # But no longer! What's more, other patrons have taken all the - # copies! - circulation_api.remote.queue_checkout(NoAvailableCopies()) - holdinfo = HoldInfo( - circulation_api.pool.collection, - circulation_api.pool.data_source, - circulation_api.identifier.type, - circulation_api.identifier.identifier, - None, - None, - 10, - ) - circulation_api.remote.queue_hold(holdinfo) - - assert [] == circulation_api.remote.availability_updated_for - - # As such, an attempt to renew our loan results in us actually - # placing a hold on the book. - loan, hold, is_new = self.borrow(circulation_api) - assert None == loan - assert True == is_new - assert circulation_api.pool == hold.license_pool - assert circulation_api.patron == hold.patron - - # When NoAvailableCopies was raised, the circulation - # information for the book was immediately updated, to reduce - # the risk that other patrons would encounter the same - # problem. - assert [circulation_api.pool] == circulation_api.remote.availability_updated_for - def test_borrow_with_expired_card_fails( self, circulation_api: CirculationAPIFixture ): @@ -924,7 +883,6 @@ def yes_we_can(*args, **kwargs): def test_revoke_loan(self, circulation_api: CirculationAPIFixture, open_access): circulation_api.pool.open_access = open_access - circulation_api.patron.last_loan_activity_sync = utc_now() circulation_api.pool.loan_to(circulation_api.patron) circulation_api.remote.queue_checkin() @@ -933,9 +891,6 @@ def test_revoke_loan(self, circulation_api: CirculationAPIFixture, open_access): ) assert True == result - # The patron's loan activity is now out of sync. - assert None == circulation_api.patron.last_loan_activity_sync - # An analytics event was created. assert 1 == circulation_api.analytics.count assert CirculationEvent.CM_CHECKIN == circulation_api.analytics.event_type @@ -944,7 +899,6 @@ def test_revoke_loan(self, circulation_api: CirculationAPIFixture, open_access): def test_release_hold(self, circulation_api: CirculationAPIFixture, open_access): circulation_api.pool.open_access = open_access - circulation_api.patron.last_loan_activity_sync = utc_now() circulation_api.pool.on_hold_to(circulation_api.patron) circulation_api.remote.queue_release_hold() @@ -953,9 +907,6 @@ def test_release_hold(self, circulation_api: CirculationAPIFixture, open_access) ) assert True == result - # The patron's loan activity is now out of sync. - assert None == circulation_api.patron.last_loan_activity_sync - # An analytics event was created. assert 1 == circulation_api.analytics.count assert CirculationEvent.CM_HOLD_RELEASE == circulation_api.analytics.event_type @@ -1055,188 +1006,252 @@ def assert_event(inp, outp): api.analytics = None api._collect_event(p1, None, "event") - def test_sync_bookshelf_ignores_local_loan_with_no_identifier( - self, circulation_api: CirculationAPIFixture + def test_can_fulfill_without_loan(self, circulation_api: CirculationAPIFixture): + """Can a title can be fulfilled without an active loan? It depends on + the BaseCirculationAPI implementation for that title's collection. + """ + + pool = circulation_api.db.licensepool(None) + mock = create_autospec(BaseCirculationAPI) + mock.can_fulfill_without_loan = MagicMock(return_value="yep") + circulation = CirculationAPI( + circulation_api.db.session, + circulation_api.db.default_library(), + {pool.collection.id: mock}, + ) + assert "yep" == circulation.can_fulfill_without_loan(None, pool, MagicMock()) + + # If format data is missing or the BaseCirculationAPI cannot + # be found, we assume the title cannot be fulfilled. + assert False == circulation.can_fulfill_without_loan(None, pool, None) + assert False == circulation.can_fulfill_without_loan(None, None, MagicMock()) + + circulation.api_for_collection = {} + assert False == circulation.can_fulfill_without_loan(None, pool, None) + + # An open access pool can be fulfilled even without the BaseCirculationAPI. + pool.open_access = True + assert True == circulation.can_fulfill_without_loan(None, pool, MagicMock()) + + +class TestBaseCirculationAPI: + def test_default_notification_email_address( + self, db: DatabaseTransactionFixture, library_fixture: LibraryFixture ): - loan, ignore = circulation_api.pool.loan_to(circulation_api.patron) - loan.start = self.YESTERDAY - circulation_api.pool.identifier = None + # Test the ability to get the default notification email address + # for a patron or a library. + settings = library_fixture.mock_settings() + settings.default_notification_email_address = "help@library" # type: ignore[assignment] + library = library_fixture.library(settings=settings) + patron = db.patron(library=library) + m = BaseCirculationAPI.default_notification_email_address + assert "help@library" == m(library, "") + assert "help@library" == m(patron, "") + other_library = library_fixture.library() + assert "noreply@thepalaceproject.org" == m(other_library, "") + + def test_can_fulfill_without_loan(self, db: DatabaseTransactionFixture): + """By default, there is a blanket prohibition on fulfilling a title + when there is no active loan. + """ + api = MockBaseCirculationAPI(db.session, db.default_collection()) + assert False == api.can_fulfill_without_loan( + MagicMock(), MagicMock(), MagicMock() + ) + + +class PatronActivityCirculationAPIFixture: + def __init__(self, db: DatabaseTransactionFixture) -> None: + self.db = db + self.patron = db.patron() + self.collection = db.collection() + edition, self.pool = db.edition( + data_source_name=DataSource.BIBLIOTHECA, + identifier_type=Identifier.BIBLIOTHECA_ID, + with_license_pool=True, + collection=self.collection, + ) + self.identifier = self.pool.identifier + self.api = MockPatronActivityCirculationAPI(db.session, self.collection) + self.now = utc_now() + self.yesterday = self.now - timedelta(days=1) + self.tomorrow = self.now + timedelta(days=1) + self.in_two_weeks = self.now + timedelta(weeks=2) + + def sync_patron_activity(self) -> None: + self.api.sync_patron_activity(self.patron, "1234") + + +@pytest.fixture +def patron_activity_circulation_api( + db: DatabaseTransactionFixture, +) -> PatronActivityCirculationAPIFixture: + return PatronActivityCirculationAPIFixture(db) + + +class TestPatronActivityCirculationAPI: + def test_sync_patron_activity_ignores_local_loan_with_no_identifier( + self, + db: DatabaseTransactionFixture, + patron_activity_circulation_api: PatronActivityCirculationAPIFixture, + ): + loan, _ = patron_activity_circulation_api.pool.loan_to( + patron_activity_circulation_api.patron + ) + loan.start = patron_activity_circulation_api.yesterday + patron_activity_circulation_api.pool.identifier = None # Verify that we can sync without crashing. - self.sync_bookshelf(circulation_api) + patron_activity_circulation_api.sync_patron_activity() # The invalid loan was ignored and is still there. - loans = circulation_api.db.session.query(Loan).all() + loans = db.session.query(Loan).all() assert [loan] == loans # Even worse - the loan has no license pool! loan.license_pool = None # But we can still sync without crashing. - self.sync_bookshelf(circulation_api) + patron_activity_circulation_api.sync_patron_activity() - def test_sync_bookshelf_ignores_local_hold_with_no_identifier( - self, circulation_api: CirculationAPIFixture + def test_sync_patron_activity_ignores_local_hold_with_no_identifier( + self, + db: DatabaseTransactionFixture, + patron_activity_circulation_api: PatronActivityCirculationAPIFixture, ): - hold, ignore = circulation_api.pool.on_hold_to(circulation_api.patron) - circulation_api.pool.identifier = None + hold, _ = patron_activity_circulation_api.pool.on_hold_to( + patron_activity_circulation_api.patron + ) + patron_activity_circulation_api.pool.identifier = None # Verify that we can sync without crashing. - self.sync_bookshelf(circulation_api) + patron_activity_circulation_api.sync_patron_activity() # The invalid hold was ignored and is still there. - holds = circulation_api.db.session.query(Hold).all() + holds = db.session.query(Hold).all() assert [hold] == holds # Even worse - the hold has no license pool! hold.license_pool = None # But we can still sync without crashing. - self.sync_bookshelf(circulation_api) + patron_activity_circulation_api.sync_patron_activity() - def test_sync_bookshelf_with_old_local_loan_and_no_remote_loan_deletes_local_loan( - self, circulation_api: CirculationAPIFixture + def test_sync_patron_activity_with_old_local_loan_and_no_remote_loan_deletes_local_loan( + self, + db: DatabaseTransactionFixture, + patron_activity_circulation_api: PatronActivityCirculationAPIFixture, ): # Local loan that was created yesterday. - loan, ignore = circulation_api.pool.loan_to(circulation_api.patron) - loan.start = self.YESTERDAY + loan, _ = patron_activity_circulation_api.pool.loan_to( + patron_activity_circulation_api.patron + ) + loan.start = patron_activity_circulation_api.yesterday # The loan is in the db. - loans = circulation_api.db.session.query(Loan).all() + loans = db.session.query(Loan).all() assert [loan] == loans - self.sync_bookshelf(circulation_api) + patron_activity_circulation_api.sync_patron_activity() # Now the local loan is gone. - loans = circulation_api.db.session.query(Loan).all() + loans = db.session.query(Loan).all() assert [] == loans - def test_sync_bookshelf_with_new_local_loan_and_no_remote_loan_keeps_local_loan( - self, circulation_api: CirculationAPIFixture + def test_sync_patron_activity_with_new_local_loan_and_no_remote_loan_keeps_local_loan( + self, + db: DatabaseTransactionFixture, + patron_activity_circulation_api: PatronActivityCirculationAPIFixture, ): # Local loan that was just created. - loan, ignore = circulation_api.pool.loan_to(circulation_api.patron) + loan, _ = patron_activity_circulation_api.pool.loan_to( + patron_activity_circulation_api.patron + ) loan.start = utc_now() # The loan is in the db. - loans = circulation_api.db.session.query(Loan).all() + loans = db.session.query(Loan).all() assert [loan] == loans - self.sync_bookshelf(circulation_api) + patron_activity_circulation_api.sync_patron_activity() # The loan is still in the db, since it was just created. - loans = circulation_api.db.session.query(Loan).all() + loans = db.session.query(Loan).all() assert [loan] == loans - def test_sync_bookshelf_with_incomplete_remotes_keeps_local_loan( - self, circulation_api: CirculationAPIFixture - ): - circulation_api.patron.last_loan_activity_sync = utc_now() - loan, ignore = circulation_api.pool.loan_to(circulation_api.patron) - loan.start = self.YESTERDAY - - circulation = circulation_api.circulation - mock_patron_activity = MagicMock(return_value=([], [], False)) - circulation.patron_activity = mock_patron_activity - circulation.sync_bookshelf(circulation_api.patron, "1234") - - assert mock_patron_activity.call_count == 1 - - # The loan is still in the db, since there was an - # error from one of the remote APIs and we don't have - # complete loan data. - loans = circulation_api.db.session.query(Loan).all() - assert [loan] == loans - - # Since we don't have complete loan data, - # patron.last_loan_activity_sync has been cleared out -- we know - # the data we have is unreliable. - assert None == circulation_api.patron.last_loan_activity_sync - - mock_patron_activity = MagicMock(return_value=([], [], True)) - circulation.patron_activity = mock_patron_activity - circulation.sync_bookshelf(circulation_api.patron, "1234") - - assert mock_patron_activity.call_count == 1 - - # Now the loan is gone. - loans = circulation_api.db.session.query(Loan).all() - assert [] == loans - - # Since we know our picture of the patron's bookshelf is up-to-date, - # patron.last_loan_activity_sync has been set to the current time. - now = utc_now() - assert ( - now - circulation_api.patron.last_loan_activity_sync - ).total_seconds() < 2 - - def test_sync_bookshelf_updates_local_loan_and_hold_with_modified_timestamps( - self, circulation_api: CirculationAPIFixture + def test_sync_patron_activity_updates_local_loan_and_hold_with_modified_timestamps( + self, + db: DatabaseTransactionFixture, + patron_activity_circulation_api: PatronActivityCirculationAPIFixture, ): # We have a local loan that supposedly runs from yesterday # until tomorrow. - loan, ignore = circulation_api.pool.loan_to(circulation_api.patron) - loan.start = self.YESTERDAY - loan.end = self.TOMORROW + loan, _ = patron_activity_circulation_api.pool.loan_to( + patron_activity_circulation_api.patron + ) + loan.start = patron_activity_circulation_api.yesterday + loan.end = patron_activity_circulation_api.tomorrow # But the remote thinks the loan runs from today until two # weeks from today. - circulation_api.circulation.add_remote_loan( + patron_activity_circulation_api.api.add_remote_loan( LoanInfo( - circulation_api.pool.collection, - circulation_api.pool.data_source, - circulation_api.identifier.type, - circulation_api.identifier.identifier, - self.TODAY, - self.IN_TWO_WEEKS, + patron_activity_circulation_api.pool.collection, + patron_activity_circulation_api.pool.data_source, + patron_activity_circulation_api.identifier.type, + patron_activity_circulation_api.identifier.identifier, + patron_activity_circulation_api.now, + patron_activity_circulation_api.in_two_weeks, ) ) # Similar situation for this hold on a different LicensePool. - edition, pool2 = circulation_api.db.edition( + _, pool2 = db.edition( data_source_name=DataSource.BIBLIOTHECA, identifier_type=Identifier.BIBLIOTHECA_ID, with_license_pool=True, - collection=circulation_api.collection, + collection=patron_activity_circulation_api.collection, ) - hold, ignore = pool2.on_hold_to(circulation_api.patron) - hold.start = self.YESTERDAY - hold.end = self.TOMORROW + hold, ignore = pool2.on_hold_to(patron_activity_circulation_api.patron) + hold.start = patron_activity_circulation_api.yesterday + hold.end = patron_activity_circulation_api.tomorrow hold.position = 10 - circulation_api.circulation.add_remote_hold( + patron_activity_circulation_api.api.add_remote_hold( HoldInfo( pool2.collection, pool2.data_source, pool2.identifier.type, pool2.identifier.identifier, - self.TODAY, - self.IN_TWO_WEEKS, + patron_activity_circulation_api.now, + patron_activity_circulation_api.in_two_weeks, 0, ) ) - circulation_api.circulation.sync_bookshelf(circulation_api.patron, "1234") + patron_activity_circulation_api.sync_patron_activity() # Our local loans and holds have been updated to reflect the new # data from the source of truth. - assert self.TODAY == loan.start - assert self.IN_TWO_WEEKS == loan.end + assert loan.start == patron_activity_circulation_api.now + assert loan.end == patron_activity_circulation_api.in_two_weeks - assert self.TODAY == hold.start - assert self.IN_TWO_WEEKS == hold.end - assert 0 == hold.position + assert hold.start == patron_activity_circulation_api.now + assert hold.end == patron_activity_circulation_api.in_two_weeks + assert hold.position == 0 - def test_sync_bookshelf_applies_locked_delivery_mechanism_to_loan( - self, circulation_api: CirculationAPIFixture + def test_sync_patron_activity_applies_locked_delivery_mechanism_to_loan( + self, + db: DatabaseTransactionFixture, + patron_activity_circulation_api: PatronActivityCirculationAPIFixture, ): # By the time we hear about the patron's loan, they've already # locked in an oddball delivery mechanism. mechanism = DeliveryMechanismInfo( Representation.TEXT_HTML_MEDIA_TYPE, DeliveryMechanism.NO_DRM ) - pool = circulation_api.db.licensepool(None) - circulation_api.circulation.add_remote_loan( + pool = db.licensepool(None) + patron_activity_circulation_api.api.add_remote_loan( LoanInfo( pool.collection, pool.data_source.name, @@ -1247,164 +1262,19 @@ def test_sync_bookshelf_applies_locked_delivery_mechanism_to_loan( locked_to=mechanism, ) ) - circulation_api.circulation.sync_bookshelf(circulation_api.patron, "1234") + patron_activity_circulation_api.sync_patron_activity() # The oddball delivery mechanism is now associated with the loan... - [loan] = circulation_api.patron.loans + [loan] = patron_activity_circulation_api.patron.loans assert loan.fulfillment is not None delivery = loan.fulfillment.delivery_mechanism assert Representation.TEXT_HTML_MEDIA_TYPE == delivery.content_type assert DeliveryMechanism.NO_DRM == delivery.drm_scheme # ... and (once we commit) with the LicensePool. - circulation_api.db.session.commit() + db.session.commit() assert loan.fulfillment in pool.delivery_mechanisms - def test_sync_bookshelf_respects_last_loan_activity_sync( - self, circulation_api: CirculationAPIFixture - ): - # We believe we have up-to-date loan activity for this patron. - now = utc_now() - circulation_api.patron.last_loan_activity_sync = now - - # Little do we know that they just used a vendor website to - # create a loan. - circulation_api.circulation.add_remote_loan( - LoanInfo( - circulation_api.pool.collection, - circulation_api.pool.data_source, - circulation_api.identifier.type, - circulation_api.identifier.identifier, - self.YESTERDAY, - self.IN_TWO_WEEKS, - ) - ) - - # Syncing our loans with the remote won't actually do anything. - circulation_api.circulation.sync_bookshelf(circulation_api.patron, "1234") - assert [] == circulation_api.patron.loans - - # But eventually, our local knowledge will grow stale. - long_ago = now - timedelta( - seconds=circulation_api.patron.loan_activity_max_age * 2 - ) - circulation_api.patron.last_loan_activity_sync = long_ago - - # At that point, sync_bookshelf _will_ go out to the remote. - now = utc_now() - circulation_api.circulation.sync_bookshelf(circulation_api.patron, "1234") - assert 1 == len(circulation_api.patron.loans) - - # Once that happens, patron.last_loan_activity_sync is updated to - # the current time. - updated = circulation_api.patron.last_loan_activity_sync - assert (updated - now).total_seconds() < 2 - - # It's also possible to force a sync even when one wouldn't - # normally happen, by passing force=True into sync_bookshelf. - circulation_api.circulation.remote_loans = [] - - # A hack to work around the rule that loans not found on - # remote don't get deleted if they were created in the last 60 - # seconds. - circulation_api.patron.loans[0].start = long_ago - circulation_api.db.session.commit() - - circulation_api.circulation.sync_bookshelf( - circulation_api.patron, "1234", force=True - ) - assert [] == circulation_api.patron.loans - assert circulation_api.patron.last_loan_activity_sync > updated - - def test_patron_activity( - self, - circulation_api: CirculationAPIFixture, - bibliotheca_files_fixture: BibliothecaFilesFixture, - ): - # Get a CirculationAPI that doesn't mock out its API's patron activity. - mock_bibliotheca = MockBibliothecaAPI( - circulation_api.db.session, circulation_api.collection - ) - circulation = CirculationAPI( - circulation_api.db.session, - circulation_api.db.default_library(), - {circulation_api.collection.id: mock_bibliotheca}, - ) - - data = bibliotheca_files_fixture.sample_data("checkouts.xml") - mock_bibliotheca.queue_response(200, content=data) - - with patch.object(PatronActivityThread, "api", return_value=mock_bibliotheca): - loans, holds, complete = circulation.patron_activity( - circulation_api.patron, "1234" - ) - assert 2 == len(loans) - assert 2 == len(holds) - assert complete is True - - mock_bibliotheca.queue_response(500, content="Error") - - with patch.object(PatronActivityThread, "api", return_value=mock_bibliotheca): - loans, holds, complete = circulation.patron_activity( - circulation_api.patron, "1234" - ) - assert 0 == len(loans) - assert 0 == len(holds) - assert complete is False - - def test_can_fulfill_without_loan(self, circulation_api: CirculationAPIFixture): - """Can a title can be fulfilled without an active loan? It depends on - the BaseCirculationAPI implementation for that title's colelction. - """ - - pool = circulation_api.db.licensepool(None) - mock = MagicMock(spec=MockPatronActivityCirculationAPI) - mock.can_fulfill_without_loan = MagicMock(return_value="yep") - circulation = CirculationAPI( - circulation_api.db.session, - circulation_api.db.default_library(), - {pool.collection.id: mock}, - ) - assert "yep" == circulation.can_fulfill_without_loan(None, pool, MagicMock()) - - # If format data is missing or the BaseCirculationAPI cannot - # be found, we assume the title cannot be fulfilled. - assert False == circulation.can_fulfill_without_loan(None, pool, None) - assert False == circulation.can_fulfill_without_loan(None, None, MagicMock()) - - circulation.api_for_collection = {} - assert False == circulation.can_fulfill_without_loan(None, pool, None) - - # An open access pool can be fulfilled even without the BaseCirculationAPI. - pool.open_access = True - assert True == circulation.can_fulfill_without_loan(None, pool, MagicMock()) - - -class TestBaseCirculationAPI: - def test_default_notification_email_address( - self, db: DatabaseTransactionFixture, library_fixture: LibraryFixture - ): - # Test the ability to get the default notification email address - # for a patron or a library. - settings = library_fixture.mock_settings() - settings.default_notification_email_address = "help@library" # type: ignore[assignment] - library = library_fixture.library(settings=settings) - patron = db.patron(library=library) - m = BaseCirculationAPI.default_notification_email_address - assert "help@library" == m(library, "") - assert "help@library" == m(patron, "") - other_library = library_fixture.library() - assert "noreply@thepalaceproject.org" == m(other_library, "") - - def test_can_fulfill_without_loan(self, db: DatabaseTransactionFixture): - """By default, there is a blanket prohibition on fulfilling a title - when there is no active loan. - """ - api = MockPatronActivityCirculationAPI(db.session, db.default_collection()) - assert False == api.can_fulfill_without_loan( - MagicMock(), MagicMock(), MagicMock() - ) - class TestDeliveryMechanismInfo: def test_apply(self, db: DatabaseTransactionFixture): diff --git a/tests/manager/api/test_opds_for_distributors.py b/tests/manager/api/test_opds_for_distributors.py index e9cc883571..a913963769 100644 --- a/tests/manager/api/test_opds_for_distributors.py +++ b/tests/manager/api/test_opds_for_distributors.py @@ -507,53 +507,6 @@ def test_fulfill(self, opds_dist_api_fixture: OPDSForDistributorsAPIFixture): < 5 ) - def test_patron_activity( - self, opds_dist_api_fixture: OPDSForDistributorsAPIFixture - ): - # The patron has two loans from this API's collection and - # one from a different collection. - patron = opds_dist_api_fixture.db.patron() - - data_source = DataSource.lookup( - opds_dist_api_fixture.db.session, "Biblioboard", autocreate=True - ) - e1, p1 = opds_dist_api_fixture.db.edition( - identifier_type=Identifier.URI, - data_source_name=data_source.name, - with_license_pool=True, - collection=opds_dist_api_fixture.collection, - ) - p1.loan_to(patron) - - e2, p2 = opds_dist_api_fixture.db.edition( - identifier_type=Identifier.URI, - data_source_name=data_source.name, - with_license_pool=True, - collection=opds_dist_api_fixture.collection, - ) - p2.loan_to(patron) - - other_collection = opds_dist_api_fixture.db.collection( - protocol=OverdriveAPI.label() - ) - e3, p3 = opds_dist_api_fixture.db.edition( - identifier_type=Identifier.OVERDRIVE_ID, - data_source_name=DataSource.OVERDRIVE, - with_license_pool=True, - collection=other_collection, - ) - p3.loan_to(patron) - - activity = opds_dist_api_fixture.api.patron_activity(patron, "1234") - assert 2 == len(activity) - [l1, l2] = activity - assert l1.collection_id == opds_dist_api_fixture.collection.id - assert l2.collection_id == opds_dist_api_fixture.collection.id - assert {l1.identifier, l2.identifier} == { - p1.identifier.identifier, - p2.identifier.identifier, - } - class TestOPDSForDistributorsImporter: def test_import(self, opds_dist_api_fixture: OPDSForDistributorsAPIFixture): diff --git a/tests/manager/api/test_overdrive.py b/tests/manager/api/test_overdrive.py index 5c1b7d08a6..d80d9e2e3a 100644 --- a/tests/manager/api/test_overdrive.py +++ b/tests/manager/api/test_overdrive.py @@ -19,7 +19,6 @@ FulfillmentInfo, HoldInfo, LoanInfo, - PatronActivityThread, ) from palace.manager.api.circulation_exceptions import ( CannotFulfill, @@ -131,9 +130,9 @@ def sample_json(self, filename): data = self.data.sample_data(filename) return data, json.loads(data) - def sync_bookshelf(self, patron: Patron): - with patch.object(PatronActivityThread, "api", return_value=self.api): - return self.circulation.sync_bookshelf(patron, "dummy pin") + def sync_patron_activity(self, patron: Patron): + self.api.sync_patron_activity(patron, "dummy pin") + return self.api.local_loans_and_holds(patron) @pytest.fixture(scope="function") @@ -2610,7 +2609,7 @@ def test_process_checkout_data(self, overdrive_api_fixture: OverdriveAPIFixture) class TestSyncBookshelf: - def test_sync_bookshelf_creates_local_loans( + def test_sync_patron_activity_creates_local_loans( self, overdrive_api_fixture: OverdriveAPIFixture ): db = overdrive_api_fixture.db @@ -2624,16 +2623,17 @@ def test_sync_bookshelf_creates_local_loans( overdrive_api_fixture.api.queue_response(200, content=holds_data) patron = db.patron() - loans, holds = overdrive_api_fixture.sync_bookshelf(patron) + loans, holds = overdrive_api_fixture.sync_patron_activity(patron) # All four loans in the sample data were created. - assert isinstance(loans, list) assert 4 == len(loans) - assert loans.sort() == patron.loans.sort() + assert set(loans.values()) == set(patron.loans) # We have created previously unknown LicensePools and # Identifiers. - identifiers = [str(loan.license_pool.identifier.identifier) for loan in loans] + identifiers = [ + str(loan.license_pool.identifier.identifier) for loan in loans.values() + ] assert sorted( [ "a5a3d737-34d4-4d69-aad8-eba4e46019a3", @@ -2646,7 +2646,7 @@ def test_sync_bookshelf_creates_local_loans( # We have recorded a new DeliveryMechanism associated with # each loan. mechanisms = [] - for loan in loans: + for loan in loans.values(): if loan.fulfillment: mechanism = loan.fulfillment.delivery_mechanism mechanisms.append((mechanism.content_type, mechanism.drm_scheme)) @@ -2658,18 +2658,16 @@ def test_sync_bookshelf_creates_local_loans( ] == mechanisms # There are no holds. - assert [] == holds + assert {} == holds # Running the sync again leaves all four loans in place. - patron.last_loan_activity_sync = None overdrive_api_fixture.api.queue_response(200, content=loans_data) overdrive_api_fixture.api.queue_response(200, content=holds_data) - loans, holds = overdrive_api_fixture.sync_bookshelf(patron) - assert isinstance(loans, list) + loans, holds = overdrive_api_fixture.sync_patron_activity(patron) assert 4 == len(loans) - assert loans.sort() == patron.loans.sort() + assert set(loans.values()) == set(patron.loans) - def test_sync_bookshelf_removes_loans_not_present_on_remote( + def test_sync_patron_activity_removes_loans_not_present_on_remote( self, overdrive_api_fixture: OverdriveAPIFixture ): db = overdrive_api_fixture.db @@ -2695,14 +2693,13 @@ def test_sync_bookshelf_removes_loans_not_present_on_remote( # Sync with Overdrive, and the loan not present in the sample # data is removed. - loans, holds = overdrive_api_fixture.sync_bookshelf(patron) + loans, holds = overdrive_api_fixture.sync_patron_activity(patron) - assert isinstance(loans, list) assert 4 == len(loans) - assert set(loans) == set(patron.loans) + assert set(loans.values()) == set(patron.loans) assert overdrive_loan not in patron.loans - def test_sync_bookshelf_ignores_loans_from_other_sources( + def test_sync_patron_activity_ignores_loans_from_other_sources( self, overdrive_api_fixture: OverdriveAPIFixture ): db = overdrive_api_fixture.db @@ -2723,11 +2720,11 @@ def test_sync_bookshelf_ignores_loans_from_other_sources( overdrive_api_fixture.api.queue_response(200, content=loans_data) overdrive_api_fixture.api.queue_response(200, content=holds_data) - overdrive_api_fixture.sync_bookshelf(patron) + overdrive_api_fixture.sync_patron_activity(patron) assert 5 == len(patron.loans) assert gutenberg_loan in patron.loans - def test_sync_bookshelf_creates_local_holds( + def test_sync_patron_activity_creates_local_holds( self, overdrive_api_fixture: OverdriveAPIFixture ): db = overdrive_api_fixture.db @@ -2739,22 +2736,19 @@ def test_sync_bookshelf_creates_local_holds( overdrive_api_fixture.api.queue_response(200, content=holds_data) patron = db.patron() - loans, holds = overdrive_api_fixture.sync_bookshelf(patron) + loans, holds = overdrive_api_fixture.sync_patron_activity(patron) # All four loans in the sample data were created. - assert isinstance(holds, list) assert 4 == len(holds) - assert sorted(holds) == sorted(patron.holds) + assert sorted(holds.values()) == sorted(patron.holds) # Running the sync again leaves all four holds in place. - patron.last_loan_activity_sync = None overdrive_api_fixture.api.queue_response(200, content=loans_data) overdrive_api_fixture.api.queue_response(200, content=holds_data) - loans, holds = overdrive_api_fixture.sync_bookshelf(patron) - assert isinstance(holds, list) + loans, holds = overdrive_api_fixture.sync_patron_activity(patron) assert 4 == len(holds) - assert sorted(holds) == sorted(patron.holds) + assert sorted(holds.values()) == sorted(patron.holds) - def test_sync_bookshelf_removes_holds_not_present_on_remote( + def test_sync_patron_activity_removes_holds_not_present_on_remote( self, overdrive_api_fixture: OverdriveAPIFixture ): db = overdrive_api_fixture.db @@ -2770,18 +2764,19 @@ def test_sync_bookshelf_removes_holds_not_present_on_remote( ) [pool] = overdrive_edition.license_pools overdrive_hold, new = pool.on_hold_to(patron) + yesterday = utc_now() - timedelta(days=1) + overdrive_hold.start = yesterday overdrive_api_fixture.api.queue_response(200, content=loans_data) overdrive_api_fixture.api.queue_response(200, content=holds_data) # The hold not present in the sample data has been removed - loans, holds = overdrive_api_fixture.sync_bookshelf(patron) - assert isinstance(holds, list) + loans, holds = overdrive_api_fixture.sync_patron_activity(patron) assert 4 == len(holds) - assert holds == patron.holds - assert overdrive_hold not in patron.loans + assert set(holds.values()) == set(patron.holds) + assert overdrive_hold not in patron.holds - def test_sync_bookshelf_ignores_holds_from_other_collections( + def test_sync_patron_activity_ignores_holds_from_other_collections( self, overdrive_api_fixture: OverdriveAPIFixture ): db = overdrive_api_fixture.db @@ -2806,7 +2801,7 @@ def test_sync_bookshelf_ignores_holds_from_other_collections( # overdrive_api_fixture.api doesn't know about the hold, but it was not # destroyed, because it came from a different collection. - overdrive_api_fixture.sync_bookshelf(patron) + overdrive_api_fixture.sync_patron_activity(patron) assert 5 == len(patron.holds) assert overdrive_hold in patron.holds diff --git a/tests/manager/celery/tasks/test_patron_activity.py b/tests/manager/celery/tasks/test_patron_activity.py new file mode 100644 index 0000000000..00fbb60e38 --- /dev/null +++ b/tests/manager/celery/tasks/test_patron_activity.py @@ -0,0 +1,216 @@ +from unittest.mock import MagicMock, create_autospec, patch + +import pytest + +from palace.manager.celery.task import Task +from palace.manager.celery.tasks.patron_activity import sync_patron_activity +from palace.manager.service.integration_registry.license_providers import ( + LicenseProvidersRegistry, +) +from palace.manager.service.logging.configuration import LogLevel +from palace.manager.service.redis.models.patron_activity import ( + PatronActivity, + PatronActivityStatus, +) +from tests.fixtures.celery import CeleryFixture +from tests.fixtures.database import DatabaseTransactionFixture +from tests.fixtures.redis import RedisFixture +from tests.fixtures.services import ServicesFixture +from tests.mocks.circulation import MockPatronActivityCirculationAPI + + +class SyncTaskFixture: + def __init__( + self, + db: DatabaseTransactionFixture, + redis_fixture: RedisFixture, + celery_fixture: CeleryFixture, + services_fixture: ServicesFixture, + ): + self.db = db + self.redis_fixture = redis_fixture + self.celery_fixture = celery_fixture + self.services_fixture = services_fixture + + self.library = db.library() + self.patron = db.patron(library=self.library) + self.collection = db.collection(library=self.library) + + self.redis_record = PatronActivity( + self.redis_fixture.client, + self.collection.id, + self.patron.id, + "test-fixture-task-id", + ) + self.mock_registry = create_autospec(LicenseProvidersRegistry) + self.services_fixture.services.integration_registry.license_providers.override( + self.mock_registry + ) + self.mock_collection_api = MockPatronActivityCirculationAPI( + self.db.session, self.collection + ) + self.mock_registry.from_collection.return_value = self.mock_collection_api + + +@pytest.fixture +def sync_task_fixture( + db: DatabaseTransactionFixture, + redis_fixture: RedisFixture, + celery_fixture: CeleryFixture, + services_fixture: ServicesFixture, +): + return SyncTaskFixture(db, redis_fixture, celery_fixture, services_fixture) + + +class TestSyncPatronActivity: + def test_unable_to_lock( + self, sync_task_fixture: SyncTaskFixture, caplog: pytest.LogCaptureFixture + ): + caplog.set_level(LogLevel.info) + + # We lock the patron activity record in redis, so the task cannot acquire it. + sync_task_fixture.redis_record.lock() + + # We patch the task to raise an exception if the db is accessed. If we don't acquire the lock + # we should never go out to the database. + with patch.object( + Task, "_session_maker", side_effect=Exception() + ) as mock_session: + sync_patron_activity.apply_async( + (sync_task_fixture.collection.id, sync_task_fixture.patron.id, "pin") + ).wait() + + assert ( + "Patron activity sync task could not acquire lock. " + "Task will not perform sync. Lock state (LOCKED)" + ) in caplog.text + assert mock_session.call_count == 0 + + def test_patron_not_found( + self, + sync_task_fixture: SyncTaskFixture, + db: DatabaseTransactionFixture, + caplog: pytest.LogCaptureFixture, + ): + patron_id = sync_task_fixture.patron.id + db.session.delete(sync_task_fixture.patron) + task = sync_patron_activity.apply_async( + (sync_task_fixture.collection.id, patron_id, "pin") + ) + task.wait() + + assert f"Patron (id: {patron_id}) not found." in caplog.text + + task_status = sync_task_fixture.redis_record.status() + assert task_status is not None + assert task_status.state == PatronActivityStatus.State.FAILED + assert task_status.task_id == task.id + + def test_collection_not_found( + self, + sync_task_fixture: SyncTaskFixture, + db: DatabaseTransactionFixture, + caplog: pytest.LogCaptureFixture, + ): + collection_id = sync_task_fixture.collection.id + db.session.delete(sync_task_fixture.collection) + task = sync_patron_activity.apply_async( + (collection_id, sync_task_fixture.patron.id, "pin") + ) + task.wait() + + assert f"Collection (id: {collection_id}) not found." in caplog.text + + task_status = sync_task_fixture.redis_record.status() + assert task_status is not None + assert task_status.state == PatronActivityStatus.State.FAILED + assert task_status.task_id == task.id + + def test_exception( + self, sync_task_fixture: SyncTaskFixture, caplog: pytest.LogCaptureFixture + ): + sync_task_fixture.mock_registry.from_collection.side_effect = Exception("Boom!") + + with pytest.raises(Exception, match="Boom!"): + sync_patron_activity.apply_async( + (sync_task_fixture.collection.id, sync_task_fixture.patron.id, "pin") + ).wait() + + task_status = sync_task_fixture.redis_record.status() + assert task_status is not None + assert task_status.state == PatronActivityStatus.State.FAILED + + assert "An exception occurred during the patron activity sync" in caplog.text + assert "Boom!" in caplog.text + + def test_not_supported( + self, sync_task_fixture: SyncTaskFixture, caplog: pytest.LogCaptureFixture + ): + caplog.set_level(LogLevel.info) + sync_task_fixture.mock_registry.from_collection.return_value = MagicMock() + + sync_patron_activity.apply_async( + (sync_task_fixture.collection.id, sync_task_fixture.patron.id, "pin") + ).wait() + + task_status = sync_task_fixture.redis_record.status() + assert task_status is not None + assert task_status.state == PatronActivityStatus.State.NOT_SUPPORTED + + assert "does not support patron activity sync" in caplog.text + sync_task_fixture.mock_registry.from_collection.assert_called_once_with( + sync_task_fixture.db.session, sync_task_fixture.collection + ) + + def test_success(self, sync_task_fixture: SyncTaskFixture): + sync_patron_activity.apply_async( + (sync_task_fixture.collection.id, sync_task_fixture.patron.id, "pin") + ).wait() + + task_status = sync_task_fixture.redis_record.status() + assert task_status is not None + assert task_status.state == PatronActivityStatus.State.SUCCESS + + sync_task_fixture.mock_registry.from_collection.assert_called_once_with( + sync_task_fixture.db.session, sync_task_fixture.collection + ) + assert len(sync_task_fixture.mock_collection_api.patron_activity_calls) == 1 + assert sync_task_fixture.mock_collection_api.patron_activity_calls[0] == ( + sync_task_fixture.patron, + "pin", + ) + + def test_force( + self, sync_task_fixture: SyncTaskFixture, caplog: pytest.LogCaptureFixture + ): + # The task has been marked as failed. Normally, this means we don't need to run it again + # until the status expires. + caplog.set_level(LogLevel.info) + sync_task_fixture.redis_record.lock() + sync_task_fixture.redis_record.fail() + + sync_patron_activity.apply_async( + (sync_task_fixture.collection.id, sync_task_fixture.patron.id, "pin") + ).wait() + + assert ( + "Patron activity sync task could not acquire lock. " + "Task will not perform sync. Lock state (FAILED)" + ) in caplog.text + sync_task_fixture.mock_registry.from_collection.assert_not_called() + + # But if we force it, we should run it again. + caplog.clear() + sync_patron_activity.apply_async( + (sync_task_fixture.collection.id, sync_task_fixture.patron.id, "pin"), + {"force": True}, + ).wait() + assert "Patron activity sync task could not acquire lock" not in caplog.text + sync_task_fixture.mock_registry.from_collection.assert_called_once_with( + sync_task_fixture.db.session, sync_task_fixture.collection + ) + + # And it will update the state when it's done. + task_status = sync_task_fixture.redis_record.status() + assert task_status is not None + assert task_status.state == PatronActivityStatus.State.SUCCESS diff --git a/tests/manager/feed/test_library_annotator.py b/tests/manager/feed/test_library_annotator.py index 9080291781..13dab31318 100644 --- a/tests/manager/feed/test_library_annotator.py +++ b/tests/manager/feed/test_library_annotator.py @@ -924,7 +924,6 @@ def test_active_loan_feed( ): vendor_id_fixture.initialize_adobe(annotator_fixture.db.default_library()) patron = annotator_fixture.db.patron() - patron.last_loan_activity_sync = utc_now() annotator = LibraryLoanAndHoldAnnotator( None, annotator_fixture.lane, diff --git a/tests/manager/service/redis/__init__.py b/tests/manager/service/redis/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/manager/service/redis/models/__init__.py b/tests/manager/service/redis/models/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/manager/service/redis/models/test_patron_activity.py b/tests/manager/service/redis/models/test_patron_activity.py new file mode 100644 index 0000000000..eb8d04db87 --- /dev/null +++ b/tests/manager/service/redis/models/test_patron_activity.py @@ -0,0 +1,452 @@ +import datetime +from unittest.mock import patch + +import pytest +from freezegun import freeze_time + +from palace.manager.service.redis.models.patron_activity import ( + PatronActivity, + PatronActivityError, + PatronActivityStatus, +) +from palace.manager.util.datetime_helpers import datetime_utc, utc_now +from tests.fixtures.database import DatabaseTransactionFixture +from tests.fixtures.redis import RedisFixture + + +class TestPatronActivityStatus: + def test_init(self): + state = PatronActivityStatus.State.FAILED + task_id = "abc" + + # If timestamp is provided, it must be timezone aware + with pytest.raises(ValueError): + PatronActivityStatus( + state=state, task_id=task_id, timestamp=datetime.datetime.now() + ) + + timestamp = datetime_utc(year=1995, month=1, day=1) + status = PatronActivityStatus(state=state, task_id=task_id, timestamp=timestamp) + assert status.state == state + assert status.task_id == task_id + assert status.timestamp == timestamp + + with freeze_time(): + status = PatronActivityStatus(state=state, task_id=task_id) + assert status.state == state + assert status.task_id == task_id + assert status.timestamp == utc_now() + + def test_offsets(self): + time = datetime_utc(2020, 1, 1, 1, 1) + status = PatronActivityStatus( + state=PatronActivityStatus.State.LOCKED, task_id="abc", timestamp=time + ) + + data_str = status.to_redis() + + state = data_str[PatronActivityStatus.STATE_OFFSET.slice] + timestamp = data_str[PatronActivityStatus.TIMESTAMP_OFFSET.slice] + task_id = data_str[PatronActivityStatus.TASK_ID_OFFSET.slice] + + assert state == str(PatronActivityStatus.State.LOCKED) + assert timestamp == "2020-01-01T01:01:00" + assert task_id == "abc" + + def test_offsets_redis(self, redis_fixture: RedisFixture): + time = datetime_utc(1919, 1, 2, 3, 4) + status = PatronActivityStatus( + state=PatronActivityStatus.State.NOT_SUPPORTED, + task_id="abc", + timestamp=time, + ) + client = redis_fixture.client + + key = client.get_key("test") + redis_fixture.client.set(key, status.to_redis()) + + assert redis_fixture.client.getrange( + key, + PatronActivityStatus.STATE_OFFSET.start, + PatronActivityStatus.STATE_OFFSET.redis_end, + ) == str(PatronActivityStatus.State.NOT_SUPPORTED) + assert ( + redis_fixture.client.getrange( + key, + PatronActivityStatus.TIMESTAMP_OFFSET.start, + PatronActivityStatus.TIMESTAMP_OFFSET.redis_end, + ) + == "1919-01-02T03:04:00" + ) + assert ( + redis_fixture.client.getrange( + key, + PatronActivityStatus.TASK_ID_OFFSET.start, + PatronActivityStatus.TASK_ID_OFFSET.redis_end, + ) + == "abc" + ) + + def test_round_trip(self): + status = PatronActivityStatus( + state=PatronActivityStatus.State.FAILED, task_id="test::123456" + ) + status_from_redis = PatronActivityStatus.from_redis(status.to_redis()) + assert status_from_redis == status + + def test_to_redis(self): + # If the state cannot be converted into a 2 character string, we should raise an error + time = datetime_utc(2024, 6, 11, 1, 24) + with freeze_time(time): + status = PatronActivityStatus( + state=PatronActivityStatus.State.FAILED, task_id="test-123" + ) + with patch.object(status, "state", "foo bar baz"): + with pytest.raises(ValueError): + status.to_redis() + + # If the timestamp cannot be converted into a 19 character string, we should raise an error + with patch.object(status, "timestamp") as mock_timestamp: + mock_timestamp.isoformat.return_value = "foo bar baz" + with pytest.raises(ValueError): + status.to_redis() + + assert status.to_redis() == "F::2024-06-11T01:24:00::test-123" + + def test___eq__(self): + timestamp = utc_now() + task_id = "test::123456" + + status = PatronActivityStatus( + state=PatronActivityStatus.State.FAILED, + task_id=task_id, + timestamp=timestamp, + ) + + # We cannot compare with a different type + assert not (status == "foo") + + # We can compare with the same instance + assert status == status + + # Or a different instance, with same data + assert status == PatronActivityStatus( + state=PatronActivityStatus.State.FAILED, + task_id=task_id, + timestamp=timestamp, + ) + + # But a different instance with different data will not be equal + assert status != PatronActivityStatus( + state=PatronActivityStatus.State.FAILED, + task_id="different", + timestamp=timestamp, + ) + assert status != PatronActivityStatus( + state=PatronActivityStatus.State.SUCCESS, + task_id=task_id, + timestamp=timestamp, + ) + assert status != PatronActivityStatus( + state=PatronActivityStatus.State.SUCCESS, + task_id=task_id, + timestamp=datetime_utc(2010, 1, 1), + ) + + +class PatronActivityFixture: + def __init__(self, db: DatabaseTransactionFixture, redis_fixture: RedisFixture): + self._db = db + self._redis = redis_fixture + + self.patron = db.patron() + self.collection = db.collection() + self.task_id = "abc" + self.patron_activity = PatronActivity( + redis_fixture.client, self.collection.id, self.patron.id, self.task_id + ) + self.other_patron_activity = PatronActivity( + redis_fixture.client, self.collection.id, self.patron.id, "123" + ) + self.timeout_slop = 5 + + def assert_ttl(self, expected_ttl: int | None) -> None: + if expected_ttl is None: + # No ttl set + assert self._redis.client.ttl(self.patron_activity.key) == -1 + else: + min_timeout = expected_ttl - self.timeout_slop + max_timeout = expected_ttl + assert ( + min_timeout + < self._redis.client.ttl(self.patron_activity.key) + <= max_timeout + ) + + def assert_state(self, state: PatronActivityStatus.State): + status = self.patron_activity.status() + assert status is not None + assert status.state == state + + +@pytest.fixture +def patron_activity_fixture( + db: DatabaseTransactionFixture, redis_fixture: RedisFixture +): + return PatronActivityFixture(db, redis_fixture) + + +class TestPatronActivity: + def test_key( + self, + patron_activity_fixture: PatronActivityFixture, + redis_fixture: RedisFixture, + ): + key = patron_activity_fixture.patron_activity.key + assert key.startswith(redis_fixture.key_prefix) + patron_key = patron_activity_fixture.patron.redis_key() + collection_key = patron_activity_fixture.collection.redis_key() + assert key.endswith(f"::PatronActivity::{patron_key}::{collection_key}") + + def test_status(self, patron_activity_fixture: PatronActivityFixture): + patron_activity = patron_activity_fixture.patron_activity + + # If there is no record, we should return None + assert patron_activity.status() is None + + # If acquire the lock, we should be able to retrieve the status + assert patron_activity.lock() is True + status = patron_activity.status() + assert status is not None + assert status.state == PatronActivityStatus.State.LOCKED + assert status.task_id == patron_activity_fixture.task_id + + # If we complete the sync, we should be able to retrieve the timestamp + timestamp = datetime_utc(1995, 1, 1) + with freeze_time(timestamp): + assert patron_activity.success() is True + status = patron_activity.status() + assert status is not None + assert status.state == PatronActivityStatus.State.SUCCESS + assert status.task_id == patron_activity_fixture.task_id + assert status.timestamp == timestamp + + # If the status gets cleared we will return None + assert patron_activity.clear() is True + assert patron_activity.status() is None + + def test_lock( + self, + patron_activity_fixture: PatronActivityFixture, + ): + patron_activity = patron_activity_fixture.patron_activity + + # If we acquire the lock, we should return True + assert patron_activity.lock() is True + + # We cannot acquire the lock again, so we should return False + assert patron_activity.lock() is False + + # We set an expiry time for the LOCKED_TIMEOUT status, so it will automatically clear + # if anything goes wrong and the lock is never released. + patron_activity_fixture.assert_ttl(patron_activity.LOCKED_TIMEOUT) + + # A different patron activity object cannot acquire the lock because we already have it + assert patron_activity_fixture.other_patron_activity.lock() is False + + def test_not_supported( + self, + patron_activity_fixture: PatronActivityFixture, + ): + patron_activity = patron_activity_fixture.patron_activity + + # If we don't have the lock acquired, we are unable to set the state to NOT_SUPPORTED + assert patron_activity.not_supported() is False + + # Once we acquire the lock, no other task can set the state to NOT_SUPPORTED, but we can + assert patron_activity.lock() is True + assert patron_activity_fixture.other_patron_activity.not_supported() is False + assert patron_activity.not_supported() is True + + patron_activity_fixture.assert_state(PatronActivityStatus.State.NOT_SUPPORTED) + + # And the key has the expected TTL set + patron_activity_fixture.assert_ttl(patron_activity.NOT_SUPPORTED_TIMEOUT) + + # We are able to clear the NOT_SUPPORTED status + assert patron_activity.clear() is True + assert patron_activity.status() is None + + def test_clear(self, patron_activity_fixture: PatronActivityFixture): + patron_activity = patron_activity_fixture.patron_activity + other_patron_activity = patron_activity_fixture.other_patron_activity + + # Clearing a status that doesn't exist will return True since there + # is nothing to clear. + assert patron_activity.status() is None + assert patron_activity.clear() is True + + # Once we acquire the lock, no one else can clear it except for us + assert patron_activity.lock() is True + assert other_patron_activity.clear() is False + assert patron_activity.clear() is True + + # If we clear the status, we cannot complete or fail it + assert patron_activity.fail() is False + assert patron_activity.success() is False + + # If we complete or fail the status, any one can clear it + for clear_func in [patron_activity.clear, other_patron_activity.clear]: + for complete_func in [patron_activity.fail, patron_activity.success]: + assert patron_activity.lock() is True + assert complete_func() is True + assert patron_activity.status() is not None + assert clear_func() is True + assert patron_activity.status() is None + + def test_success( + self, + patron_activity_fixture: PatronActivityFixture, + ): + patron_activity = patron_activity_fixture.patron_activity + + # If we to update the status to success for a status that doesn't exist, we should return False + assert patron_activity.success() is False + + # Once we acquire the lock, a PatronActivity with a different task_id + # cannot update the status to success. + assert patron_activity.lock() is True + assert patron_activity_fixture.other_patron_activity.success() is False + + # But we can, and the status should be updated + test_time = datetime_utc(1999, 9, 9, 9, 9, 9) + with freeze_time(test_time): + assert patron_activity.success() is True + assert patron_activity.status() == PatronActivityStatus( + state=PatronActivityStatus.State.SUCCESS, + task_id=patron_activity_fixture.task_id, + timestamp=test_time, + ) + + # Calling success again should return False + assert patron_activity.success() is False + + # Trying to fail a successful status should return False + assert patron_activity.fail() is False + + # We set a timeout for the status, so it will automatically clear, indicating that + # we want to sync the patron activity again. + patron_activity_fixture.assert_ttl(patron_activity.SUCCESS_TIMEOUT) + + def test_fail( + self, + patron_activity_fixture: PatronActivityFixture, + ): + patron_activity = patron_activity_fixture.patron_activity + + # If we try to fail a status that doesn't exist, we should return False + assert patron_activity.fail() is False + + # If we acquire the lock, a different PatronActivity object cannot update the status to failed + assert patron_activity.lock() is True + assert patron_activity_fixture.other_patron_activity.fail() is False + + # But we can, and the status should be updated + with freeze_time(): + assert patron_activity.fail() is True + assert patron_activity.status() == PatronActivityStatus( + state=PatronActivityStatus.State.FAILED, + task_id=patron_activity_fixture.task_id, + ) + + # Trying to fail the status again should return False + assert patron_activity.fail() is False + + # Trying to complete a failed status should return False + assert patron_activity.success() is False + + # We set a timeout for the status, so it will automatically clear after some time + # indicating that we want to try to sync the patron activity again. + patron_activity_fixture.assert_ttl(patron_activity.FAILED_TIMEOUT) + + def test_context_manager( + self, + patron_activity_fixture: PatronActivityFixture, + caplog: pytest.LogCaptureFixture, + ): + patron_activity = patron_activity_fixture.patron_activity + other_patron_activity = patron_activity_fixture.other_patron_activity + + # The context manager should set the status to LOCKED when entered and complete it when exited + with freeze_time(): + with patron_activity as acquired: + assert acquired is True + patron_activity_fixture.assert_state(PatronActivityStatus.State.LOCKED) + patron_activity_fixture.assert_state(PatronActivityStatus.State.SUCCESS) + + # If there is an exception, the status should be failed instead of completed, and we should + # log the exception. + patron_activity.clear() + with pytest.raises(Exception): + with patron_activity as acquired: + assert acquired is True + raise Exception() + patron_activity_fixture.assert_state(PatronActivityStatus.State.FAILED) + assert "An exception occurred during the patron activity sync" in caplog.text + + # If the status is already LOCKED, the context manager returns False to indicate that + # it could not acquire the lock + patron_activity.clear() + other_patron_activity.lock() + with patron_activity as acquired: + assert acquired is False + # In this case the context manager does not update the status when it exits + patron_activity_fixture.assert_state(PatronActivityStatus.State.LOCKED) + assert other_patron_activity.clear() is True + + # Nesting the context manager will raise an error + with pytest.raises(PatronActivityError): + with patron_activity as acquired: + assert acquired is True + with patron_activity: + ... + + def test_collections_ready_for_sync( + self, redis_fixture: RedisFixture, db: DatabaseTransactionFixture + ): + library = db.library() + patron = db.patron(library=library) + + collection_success = db.collection(library=library) + activity_success = PatronActivity( + redis_fixture.client, collection_success, patron, "abc" + ) + with activity_success: + activity_success.success() + + collection_fail = db.collection(library=library) + activity_fail = PatronActivity( + redis_fixture.client, collection_fail, patron, "def" + ) + with activity_fail: + activity_fail.fail() + + collection_not_supported = db.collection(library=library) + activity_not_supported = PatronActivity( + redis_fixture.client, collection_not_supported, patron, "ghi" + ) + with activity_not_supported: + activity_not_supported.not_supported() + + collection_locked = db.collection(library=library) + activity_locked = PatronActivity( + redis_fixture.client, collection_locked, patron, "jkl" + ) + activity_locked.lock() + + collection_ready_1 = db.collection(library=library) + collection_ready_2 = db.collection(library=library) + + assert PatronActivity.collections_ready_for_sync( + redis_fixture.client, patron + ) == {collection_ready_1, collection_ready_2} diff --git a/tests/manager/service/redis/test_key.py b/tests/manager/service/redis/test_key.py new file mode 100644 index 0000000000..1b86baa6b7 --- /dev/null +++ b/tests/manager/service/redis/test_key.py @@ -0,0 +1,47 @@ +import pytest + +from palace.manager.service.redis.exception import RedisKeyError +from tests.fixtures.redis import RedisFixture + + +class MockSupportsRedisKey: + def __init__(self, key: str = "test"): + self.key = key + + def redis_key(self) -> str: + return self.key + + +class TestRedisKeyGenerator: + def test___call__(self, redis_fixture: RedisFixture): + key_prefix = redis_fixture.key_prefix + key_generator = redis_fixture.client.get_key + sep = key_generator.SEPERATOR + + # No args returns just the key prefix + key = key_generator() + assert key == key_prefix + + # Simple string key + test_key = "test" + key = key_generator(test_key) + + # Key always includes the key prefix and is separated by the RedisKeyGenerator.SEPERATOR + assert key == f"{key_prefix}{sep}{test_key}" + + # Multiple args are all included and separated by the RedisKeyGenerator.SEPERATOR + key = key_generator("test", "key", "generator") + assert key == f"{key_prefix}{sep}test{sep}key{sep}generator" + + # ints are also supported and are converted to strings + key = key_generator(1, 2, 3) + assert key == f"{key_prefix}{sep}1{sep}2{sep}3" + + # SupportsRedisKey objects are supported, and their __redis_key__ method is called to get the key + key = key_generator(MockSupportsRedisKey("test"), MockSupportsRedisKey("key")) + assert key == f"{key_prefix}{sep}test{sep}key" + + # Unsupported types raise a RedisKeyError + with pytest.raises(RedisKeyError) as exc_info: + key_generator([1, 2, 3]) # type: ignore[arg-type] + assert "Unsupported key type: [1, 2, 3] (list)" in str(exc_info.value) diff --git a/tests/manager/service/redis/test_redis.py b/tests/manager/service/redis/test_redis.py new file mode 100644 index 0000000000..381cbdcbec --- /dev/null +++ b/tests/manager/service/redis/test_redis.py @@ -0,0 +1,65 @@ +import pytest + +from palace.manager.service.redis.exception import RedisValueError +from tests.fixtures.redis import RedisFixture + + +class TestRedis: + def test_client(self, redis_fixture: RedisFixture): + # Do a bit of basic testing to make sure the client is working + client = redis_fixture.client + key = client.get_key("test") + redis_fixture.client.set(key, "value") + assert redis_fixture.client.get(key) == "value" + + def test_prefix_check(self, redis_fixture: RedisFixture): + # Our version of the redis client checks that keys are prefixed correctly + client = redis_fixture.client + + key = "test" + with pytest.raises(RedisValueError) as exc_info: + client.set(key, "value") + assert ( + f"Key {key} does not start with prefix {redis_fixture.key_prefix}" + in str(exc_info.value) + ) + + # We also handle commands with multiple keys + key1 = client.get_key("test1") + key2 = client.get_key("test2") + key3 = "unprefixed" + + with pytest.raises(RedisValueError) as exc_info: + client.delete(key1, key2, key3) + assert ( + f"Key {key3} does not start with prefix {redis_fixture.key_prefix}" + in str(exc_info.value) + ) + + # If we pass a command that isn't checked for a prefix, we raise an error + with pytest.raises(RedisValueError) as exc_info: + client.execute_command("UNKNOWN", key1) + assert "Command UNKNOWN is not checked for prefix" in str(exc_info.value) + + def test_pipeline(self, redis_fixture: RedisFixture): + # We can also use pipelines + client = redis_fixture.client + key = client.get_key("test") + with client.pipeline() as pipe: + pipe.set(key, "value") + pipe.get(key) + result = pipe.execute() + assert result == [True, "value"] + + # Any pipeline commands are also checked for prefixes + with pytest.raises(RedisValueError) as exc_info: + with client.pipeline() as pipe: + pipe.set(key, "value") + pipe.get(key) + pipe.set("unprefixed", "value") + pipe.get("unprefixed") + pipe.execute() + assert ( + f"Key unprefixed does not start with prefix {redis_fixture.key_prefix}" + in str(exc_info.value) + ) diff --git a/tests/manager/sqlalchemy/model/test_collection.py b/tests/manager/sqlalchemy/model/test_collection.py index 39c7b37077..77470d32c4 100644 --- a/tests/manager/sqlalchemy/model/test_collection.py +++ b/tests/manager/sqlalchemy/model/test_collection.py @@ -597,3 +597,20 @@ def test_delete( # We've now deleted every LicensePool created for this test. assert 0 == db.session.query(LicensePool).count() assert [] == work2.license_pools + + def test_redis_key(self, example_collection_fixture: ExampleCollectionFixture): + collection = example_collection_fixture.collection + + # The key is based on the collection's ID. + assert collection.redis_key() == f"Collection::{collection.id}" + + # If we know the collection's ID, we can get the key without a database query. + assert Collection.redis_key_from_id(collection.id) == collection.redis_key() + + # A collection with no id raises an exception. + collection_no_id = Collection() + with pytest.raises(TypeError) as excinfo: + collection_no_id.redis_key() + assert "Collection must have an id to generate a redis key." in str( + excinfo.value + ) diff --git a/tests/manager/sqlalchemy/model/test_licensing.py b/tests/manager/sqlalchemy/model/test_licensing.py index edbd5ab8bd..b97e40eb6a 100644 --- a/tests/manager/sqlalchemy/model/test_licensing.py +++ b/tests/manager/sqlalchemy/model/test_licensing.py @@ -337,12 +337,10 @@ def test_loan_to(self, licenses: LicenseTestFixture): pool = licenses.pool license = licenses.perpetual patron = licenses.db.patron() - patron.last_loan_activity_sync = utc_now() loan, is_new = license.loan_to(patron) assert license == loan.license assert pool == loan.license_pool assert True == is_new - assert None == patron.last_loan_activity_sync loan2, is_new = license.loan_to(patron) assert loan == loan2 @@ -1125,15 +1123,10 @@ def test_calculate_change_from_one_event(self, db: DatabaseTransactionFixture): def test_loan_to_patron(self, db: DatabaseTransactionFixture): # Test our ability to loan LicensePools to Patrons. - # - # TODO: The path where the LicensePool is loaned to an - # IntegrationClient rather than a Patron is currently not - # directly tested. pool = db.licensepool(None) patron = db.patron() now = utc_now() - patron.last_loan_activity_sync = now yesterday = now - datetime.timedelta(days=1) tomorrow = now + datetime.timedelta(days=1) @@ -1148,7 +1141,7 @@ def test_loan_to_patron(self, db: DatabaseTransactionFixture): external_identifier=external_identifier, ) - assert True == is_new + assert is_new is True assert isinstance(loan, Loan) assert pool == loan.license_pool assert patron == loan.patron @@ -1157,16 +1150,7 @@ def test_loan_to_patron(self, db: DatabaseTransactionFixture): assert fulfillment == loan.fulfillment assert external_identifier == loan.external_identifier - # Issuing a loan locally created uncertainty about a patron's - # loans, since we don't know how the external vendor dealt - # with the request. The last_loan_activity_sync has been - # cleared out so we know to check back with the source of - # truth. - assert None == patron.last_loan_activity_sync - - # 'Creating' a loan that already exists does not create any - # uncertainty. - patron.last_loan_activity_sync = now + # 'Creating' a loan that already exists returns the existing loan. loan2, is_new = pool.loan_to( patron, start=yesterday, @@ -1174,20 +1158,15 @@ def test_loan_to_patron(self, db: DatabaseTransactionFixture): fulfillment=fulfillment, external_identifier=external_identifier, ) - assert False == is_new + assert is_new == False assert loan == loan2 - assert now == patron.last_loan_activity_sync def test_on_hold_to_patron(self, db: DatabaseTransactionFixture): # Test our ability to put a Patron in the holds queue for a LicensePool. - # - # TODO: The path where the 'patron' is an IntegrationClient - # rather than a Patron is currently not directly tested. pool = db.licensepool(None) patron = db.patron() now = utc_now() - patron.last_loan_activity_sync = now yesterday = now - datetime.timedelta(days=1) tomorrow = now + datetime.timedelta(days=1) @@ -1203,7 +1182,7 @@ def test_on_hold_to_patron(self, db: DatabaseTransactionFixture): external_identifier=external_identifier, ) - assert True == is_new + assert is_new is True assert isinstance(hold, Hold) assert pool == hold.license_pool assert patron == hold.patron @@ -1212,16 +1191,7 @@ def test_on_hold_to_patron(self, db: DatabaseTransactionFixture): assert position == hold.position assert external_identifier == hold.external_identifier - # Issuing a hold locally created uncertainty about a patron's - # loans, since we don't know how the external vendor dealt - # with the request. The last_loan_activity_sync has been - # cleared out so we know to check back with the source of - # truth. - assert None == patron.last_loan_activity_sync - - # 'Creating' a hold that already exists does not create any - # uncertainty. - patron.last_loan_activity_sync = now + # 'Creating' a hold that already exists returns the existing hold. hold2, is_new = pool.on_hold_to( patron, start=yesterday, @@ -1229,9 +1199,8 @@ def test_on_hold_to_patron(self, db: DatabaseTransactionFixture): position=position, external_identifier=external_identifier, ) - assert False == is_new + assert is_new is False assert hold == hold2 - assert now == patron.last_loan_activity_sync class TestLicensePoolDeliveryMechanism: diff --git a/tests/manager/sqlalchemy/model/test_patron.py b/tests/manager/sqlalchemy/model/test_patron.py index 3d1d9f91bb..506f0f5956 100644 --- a/tests/manager/sqlalchemy/model/test_patron.py +++ b/tests/manager/sqlalchemy/model/test_patron.py @@ -364,6 +364,19 @@ def test_repr(self, db: DatabaseTransactionFixture): == repr(patron) ) + def test_redis_key(self, db: DatabaseTransactionFixture): + patron = db.patron(external_identifier="a patron") + assert patron.redis_key() == f"Patron::{patron.id}" + + # If we know the patron's ID, we can get the key without a database query. + assert Patron.redis_key_from_id(patron.id) == patron.redis_key() + + # If the patron has no id, we raise an exception. + patron_no_id = Patron() + with pytest.raises(TypeError) as excinfo: + patron_no_id.redis_key() + assert "Patron must have an id to generate a redis key." in str(excinfo.value) + def test_identifier_to_remote_service(self, db: DatabaseTransactionFixture): # Here's a patron. patron = db.patron() @@ -489,33 +502,6 @@ def test_cascade_delete(self, db: DatabaseTransactionFixture): assert db.session.query(Annotation).all() == [] assert db.session.query(Credential).all() == [] - def test_loan_activity_max_age(self, db: DatabaseTransactionFixture): - # Currently, patron.loan_activity_max_age is a constant - # and cannot be changed. - assert 15 * 60 == db.patron().loan_activity_max_age - - def test_last_loan_activity_sync(self, db: DatabaseTransactionFixture): - # Verify that last_loan_activity_sync is cleared out - # beyond a certain point. - patron = db.patron() - now = utc_now() - max_age = patron.loan_activity_max_age - recently = now - datetime.timedelta(seconds=max_age / 2) - long_ago = now - datetime.timedelta(seconds=max_age * 2) - - # So long as last_loan_activity_sync is relatively recent, - # it's treated as a normal piece of data. - patron.last_loan_activity_sync = recently - assert recently == patron._last_loan_activity_sync - assert recently == patron.last_loan_activity_sync - - # If it's _not_ relatively recent, attempting to access it - # doesn't clear it out, but accessor returns None - patron.last_loan_activity_sync = long_ago - assert long_ago == patron._last_loan_activity_sync - assert None == patron.last_loan_activity_sync - assert long_ago == patron._last_loan_activity_sync - def test_root_lane(self, db: DatabaseTransactionFixture): root_1 = db.lane() root_2 = db.lane() diff --git a/tests/mocks/circulation.py b/tests/mocks/circulation.py index 84b6b1a90c..e2e0657f14 100644 --- a/tests/mocks/circulation.py +++ b/tests/mocks/circulation.py @@ -29,7 +29,7 @@ from palace.manager.sqlalchemy.model.patron import Patron -class MockPatronActivityCirculationAPI(PatronActivityCirculationAPI): +class MockBaseCirculationAPI(BaseCirculationAPI): def __init__( self, _db: Session, @@ -96,12 +96,6 @@ def fulfill( # Should be a FulfillmentInfo. return self._return_or_raise("fulfill") - def patron_activity( - self, patron: Patron, pin: str | None - ) -> Iterable[LoanInfo | HoldInfo]: - # This method isn't used on the mock, so we just raise an exception. - raise NotImplementedError() - def checkin(self, patron: Patron, pin: str, licensepool: LicensePool) -> None: # Return value is not checked. return self._return_or_raise("checkin") @@ -145,27 +139,7 @@ def __init__( analytics: Analytics | None = None, ): super().__init__(db, library, library_collection_apis, analytics=analytics) - self.remote_loans: list[LoanInfo] = [] - self.remote_holds: list[HoldInfo] = [] - self.remotes: dict[str, MockPatronActivityCirculationAPI] = {} - - def add_remote_loan( - self, - loan: LoanInfo, - ) -> None: - self.remote_loans.append(loan) - - def add_remote_hold( - self, - hold: HoldInfo, - ) -> None: - self.remote_holds.append(hold) - - def patron_activity( - self, patron: Patron, pin: str | None - ) -> tuple[list[LoanInfo], list[HoldInfo], bool]: - """Return a 3-tuple (loans, holds, completeness).""" - return self.remote_loans, self.remote_holds, True + self.remotes: dict[str, MockBaseCirculationAPI] = {} def queue_checkout( self, licensepool: LicensePool, response: LoanInfo | HoldInfo | Exception @@ -197,9 +171,7 @@ def queue_release_hold( api = self.api_for_license_pool(licensepool) api.queue_release_hold(response) - def api_for_license_pool( - self, licensepool: LicensePool - ) -> MockPatronActivityCirculationAPI: + def api_for_license_pool(self, licensepool: LicensePool) -> MockBaseCirculationAPI: source = licensepool.data_source.name assert source is not None if source not in self.remotes: @@ -209,7 +181,7 @@ def api_for_license_pool( set_delivery_mechanism_at = BaseCirculationAPI.BORROW_STEP if source == DataSource.THREEM: can_revoke_hold_when_reserved = False - remote = MockPatronActivityCirculationAPI( + remote = MockBaseCirculationAPI( self._db, licensepool.collection, set_delivery_mechanism_at, @@ -218,6 +190,39 @@ def api_for_license_pool( self.remotes[source] = remote return self.remotes[source] + def add_remote_api( + self, licensepool: LicensePool, api: MockBaseCirculationAPI + ) -> None: + source = licensepool.data_source.name + assert source is not None + self.remotes[source] = api + + +class MockPatronActivityCirculationAPI( + MockBaseCirculationAPI, PatronActivityCirculationAPI +): + def __init__(self, _db: Session, collection: Collection): + super().__init__(_db, collection) + self.remote_loans: list[LoanInfo] = [] + self.remote_holds: list[HoldInfo] = [] + self.patron_activity_calls: list[tuple[Patron, str | None]] = [] + + def add_remote_loan( + self, + loan: LoanInfo, + ) -> None: + self.remote_loans.append(loan) + + def add_remote_hold(self, hold: HoldInfo) -> None: + self.remote_holds.append(hold) + + def patron_activity( + self, patron: Patron, pin: str | None + ) -> Iterable[LoanInfo | HoldInfo]: + self.patron_activity_calls.append((patron, pin)) + yield from self.remote_loans + yield from self.remote_holds + class MockCirculationManager(CirculationManager): d_circulation: MockCirculationAPI diff --git a/tox.ini b/tox.ini index 9dba423c89..b0b53b0811 100644 --- a/tox.ini +++ b/tox.ini @@ -19,10 +19,12 @@ setenv = docker: PALACE_TEST_MINIO_URL_SCHEME=http docker: PALACE_TEST_MINIO_USER=palace docker: PALACE_TEST_MINIO_PASSWORD=12345678901234567890 + docker: PALACE_TEST_REDIS_URL_SCHEME=redis docker = docker: os-circ docker: db-circ docker: minio-circ + docker: redis-circ allowlist_externals = python poetry @@ -71,6 +73,13 @@ expose = host_var = PALACE_TEST_MINIO_URL_HOST +[docker:redis-circ] +image = redis:7 +expose = + PALACE_TEST_REDIS_URL_PORT=6379/tcp +host_var = + PALACE_TEST_REDIS_URL_HOST + [gh-actions] python = 3.10: py310