diff --git a/README.md b/README.md index 5c0c1402d7..2589105bd5 100644 --- a/README.md +++ b/README.md @@ -146,7 +146,7 @@ grant all privileges on database circ to palace; ### Redis -Redis is used as the broker for Celery. You can run Redis with docker using the following command: +Redis is used as the broker for Celery and the caching layer. You can run Redis with docker using the following command: ```sh docker run -d --name redis -p 6379:6379 redis @@ -193,6 +193,19 @@ We support overriding a number of other Celery settings via environment variable the defaults should be sufficient. The full list of settings can be found in [`service/celery/configuration.py`](src/palace/manager/service/celery/configuration.py). +#### Redis + +We use Redis as the caching layer for the application. Although you can use the same redis database for both +Celery and caching, we recommend that you use a separate database for each purpose to avoid conflicts. + +- `PALACE_REDIS_URL`: The URL of the Redis instance to use for caching. (**required**). + - for example: + ```sh + export PALACE_REDIS_URL="redis://localhost:6379/1" + ``` +- `PALACE_REDIS_KEY_PREFIX`: The prefix to use for keys stored in the Redis instance. The default is `palace`. + This is useful if you want to use the same Redis database for multiple CM (optional). + #### General - `PALACE_BASE_URL`: The base URL of the application. Used to create absolute links. (optional) diff --git a/docker-compose.yml b/docker-compose.yml index 9fbd5deee3..f9e801d4a2 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -20,6 +20,7 @@ x-cm-variables: &cm PALACE_CELERY_BROKER_URL: "redis://redis:6379/0" PALACE_CELERY_BROKER_TRANSPORT_OPTIONS_GLOBAL_KEYPREFIX: "test" PALACE_CELERY_CLOUDWATCH_STATISTICS_DRYRUN: "true" + PALACE_REDIS_URL: "redis://redis:6379/1" # Set up the environment variables used for testing as well PALACE_TEST_DATABASE_URL: "postgresql://palace:test@pg:5432/circ" @@ -27,6 +28,7 @@ x-cm-variables: &cm PALACE_TEST_MINIO_URL: "http://minio:9000" PALACE_TEST_MINIO_USER: "palace" PALACE_TEST_MINIO_PASSWORD: "test123456789" + PALACE_TEST_REDIS_URL: "redis://redis:6379/2" depends_on: pg: condition: service_healthy diff --git a/poetry.lock b/poetry.lock index f27592b06d..60a93b41db 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. [[package]] name = "alembic" @@ -1530,8 +1530,6 @@ files = [ {file = "frozendict-2.4.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d13b4310db337f4d2103867c5a05090b22bc4d50ca842093779ef541ea9c9eea"}, {file = "frozendict-2.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:b3b967d5065872e27b06f785a80c0ed0a45d1f7c9b85223da05358e734d858ca"}, {file = "frozendict-2.4.4-cp39-cp39-win_arm64.whl", hash = "sha256:4ae8d05c8d0b6134bfb6bfb369d5fa0c4df21eabb5ca7f645af95fdc6689678e"}, - {file = "frozendict-2.4.4-py311-none-any.whl", hash = "sha256:705efca8d74d3facbb6ace80ab3afdd28eb8a237bfb4063ed89996b024bc443d"}, - {file = "frozendict-2.4.4-py312-none-any.whl", hash = "sha256:d9647563e76adb05b7cde2172403123380871360a114f546b4ae1704510801e5"}, {file = "frozendict-2.4.4.tar.gz", hash = "sha256:3f7c031b26e4ee6a3f786ceb5e3abf1181c4ade92dce1f847da26ea2c96008c7"}, ] @@ -3961,13 +3959,13 @@ full = ["numpy"] [[package]] name = "redis" -version = "5.0.4" +version = "5.0.5" description = "Python client for Redis database and key-value store" optional = false python-versions = ">=3.7" files = [ - {file = "redis-5.0.4-py3-none-any.whl", hash = "sha256:7adc2835c7a9b5033b7ad8f8918d09b7344188228809c98df07af226d39dec91"}, - {file = "redis-5.0.4.tar.gz", hash = "sha256:ec31f2ed9675cc54c21ba854cfe0462e6faf1d83c8ce5944709db8a4700b9c61"}, + {file = "redis-5.0.5-py3-none-any.whl", hash = "sha256:30b47d4ebb6b7a0b9b40c1275a19b87bb6f46b3bed82a89012cf56dea4024ada"}, + {file = "redis-5.0.5.tar.gz", hash = "sha256:3417688621acf6ee368dec4a04dd95881be24efd34c79f00d31f62bb528800ae"}, ] [package.dependencies] @@ -4684,6 +4682,21 @@ files = [ {file = "types_PyYAML-6.0.12.20240311-py3-none-any.whl", hash = "sha256:b845b06a1c7e54b8e5b4c683043de0d9caf205e7434b3edc678ff2411979b8f6"}, ] +[[package]] +name = "types-redis" +version = "4.6.0.20240425" +description = "Typing stubs for redis" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-redis-4.6.0.20240425.tar.gz", hash = "sha256:9402a10ee931d241fdfcc04592ebf7a661d7bb92a8dea631279f0d8acbcf3a22"}, + {file = "types_redis-4.6.0.20240425-py3-none-any.whl", hash = "sha256:ac5bc19e8f5997b9e76ad5d9cf15d0392d9f28cf5fc7746ea4a64b989c45c6a8"}, +] + +[package.dependencies] +cryptography = ">=35.0.0" +types-pyOpenSSL = "*" + [[package]] name = "types-requests" version = "2.31.0.6" @@ -5038,4 +5051,4 @@ lxml = ">=3.8" [metadata] lock-version = "2.0" python-versions = ">=3.10,<4" -content-hash = "5538037fe8bd607d0278f5688a9aa877e73140a647928f87b0f30f7823f8adce" +content-hash = "6926c293e113bb1539170fb512788115a9946d1c74d0dbd439d63fdbf2805198" diff --git a/pyproject.toml b/pyproject.toml index 48610f8419..67445dda30 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -263,6 +263,7 @@ python-dateutil = "2.9.0.post0" python3-saml = "^1.16" # python-saml is required for SAML authentication pytz = "^2023.3" pyyaml = "^6.0" +redis = "^5.0.5" redmail = "^0.6.0" requests = "^2.29" sqlalchemy = {version = "^1.4", extras = ["mypy"]} @@ -308,6 +309,7 @@ types-Pillow = "^10.0.0" types-psycopg2 = "^2.9.21" types-python-dateutil = "^2.8.19" types-pytz = "^2024.1" +types-redis = "^4.6.0.20240425" types-requests = "^2.28.11" [tool.poetry.group.pg] diff --git a/src/palace/manager/celery/tasks/patron_activity.py b/src/palace/manager/celery/tasks/patron_activity.py new file mode 100644 index 0000000000..e07082a912 --- /dev/null +++ b/src/palace/manager/celery/tasks/patron_activity.py @@ -0,0 +1,183 @@ +# def sync_bookshelf( +# self, patron: Patron, pin: str | None, force: bool = False +# ) -> tuple[list[Loan] | Query[Loan], list[Hold] | Query[Hold]]: +# """Sync our internal model of a patron's bookshelf with any external +# vendors that provide books to the patron's library. +# +# :param patron: A Patron. +# :param pin: The password authenticating the patron; used by some vendors +# that perform a cross-check against the library ILS. +# :param force: If this is True, the method will call out to external +# vendors even if it looks like the system has up-to-date information +# about the patron. +# """ +# # Get our internal view of the patron's current state. +# local_loans = self.local_loans(patron) +# local_holds = self.local_holds(patron) +# +# if patron and patron.last_loan_activity_sync and not force: +# # Our local data is considered fresh, so we can return it +# # without calling out to the vendor APIs. +# return local_loans, local_holds +# +# # Assuming everything goes well, we will set +# # Patron.last_loan_activity_sync to this value -- the moment +# # just before we started contacting the vendor APIs. +# last_loan_activity_sync: datetime.datetime | None = utc_now() +# +# # Update the external view of the patron's current state. +# remote_loans, remote_holds, complete = self.patron_activity(patron, pin) +# __transaction = self._db.begin_nested() +# +# if not complete: +# # We were not able to get a complete picture of the +# # patron's loan activity. Until we are able to do that, we +# # should never assume that our internal model of the +# # patron's loans is good enough to cache. +# last_loan_activity_sync = None +# +# now = utc_now() +# local_loans_by_identifier = {} +# local_holds_by_identifier = {} +# for l in local_loans: +# if not l.license_pool: +# self.log.error("Active loan with no license pool!") +# continue +# i = l.license_pool.identifier +# if not i: +# self.log.error( +# "Active loan on license pool %s, which has no identifier!", +# l.license_pool, +# ) +# continue +# key = (i.type, i.identifier) +# local_loans_by_identifier[key] = l +# for h in local_holds: +# if not h.license_pool: +# self.log.error("Active hold with no license pool!") +# continue +# i = h.license_pool.identifier +# if not i: +# self.log.error( +# "Active hold on license pool %r, which has no identifier!", +# h.license_pool, +# ) +# continue +# key = (i.type, i.identifier) +# local_holds_by_identifier[key] = h +# +# active_loans = [] +# active_holds = [] +# start: datetime.datetime | None +# end: datetime.datetime | None +# for loan in remote_loans: +# # This is a remote loan. Find or create the corresponding +# # local loan. +# pool = loan.license_pool(self._db) +# start = loan.start_date +# end = loan.end_date +# key = (loan.identifier_type, loan.identifier) +# if key in local_loans_by_identifier: +# # We already have the Loan object, we don't need to look +# # it up again. +# local_loan = local_loans_by_identifier[key] +# +# # But maybe the remote's opinions as to the loan's +# # start or end date have changed. +# if start: +# local_loan.start = start +# if end: +# local_loan.end = end +# else: +# local_loan, new = pool.loan_to(patron, start, end) +# +# if loan.locked_to: +# # The loan source is letting us know that the loan is +# # locked to a specific delivery mechanism. Even if +# # this is the first we've heard of this loan, +# # it may have been created in another app or through +# # a library-website integration. +# loan.locked_to.apply(local_loan, autocommit=False) +# active_loans.append(local_loan) +# +# # Check the local loan off the list we're keeping so we +# # don't delete it later. +# key = (loan.identifier_type, loan.identifier) +# if key in local_loans_by_identifier: +# del local_loans_by_identifier[key] +# +# for hold in remote_holds: +# # This is a remote hold. Find or create the corresponding +# # local hold. +# pool = hold.license_pool(self._db) +# start = hold.start_date +# end = hold.end_date +# position = hold.hold_position +# key = (hold.identifier_type, hold.identifier) +# if key in local_holds_by_identifier: +# # We already have the Hold object, we don't need to look +# # it up again. +# local_hold = local_holds_by_identifier[key] +# +# # But maybe the remote's opinions as to the hold's +# # start or end date have changed. +# local_hold.update(start, end, position) +# else: +# local_hold, new = pool.on_hold_to(patron, start, end, position) +# active_holds.append(local_hold) +# +# # Check the local hold off the list we're keeping so that +# # we don't delete it later. +# if key in local_holds_by_identifier: +# del local_holds_by_identifier[key] +# +# # We only want to delete local loans and holds if we were able to +# # successfully sync with all the providers. If there was an error, +# # the provider might still know about a loan or hold that we don't +# # have in the remote lists. +# if complete: +# # Every loan remaining in loans_by_identifier is a hold that +# # the provider doesn't know about. This usually means it's expired +# # and we should get rid of it, but it's possible the patron is +# # borrowing a book and syncing their bookshelf at the same time, +# # and the local loan was created after we got the remote loans. +# # If the loan's start date is less than a minute ago, we'll keep it. +# for local_loan in list(local_loans_by_identifier.values()): +# if ( +# local_loan.license_pool.collection_id +# in self.collection_ids_for_sync +# ): +# one_minute_ago = utc_now() - datetime.timedelta(minutes=1) +# if local_loan.start is None or local_loan.start < one_minute_ago: +# logging.info( +# "In sync_bookshelf for patron %s, deleting loan %s (patron %s)" +# % ( +# patron.authorization_identifier, +# str(local_loan.id), +# local_loan.patron.authorization_identifier, +# ) +# ) +# self._db.delete(local_loan) +# else: +# logging.info( +# "In sync_bookshelf for patron %s, found local loan %s created in the past minute that wasn't in remote loans" +# % (patron.authorization_identifier, str(local_loan.id)) +# ) +# +# # Every hold remaining in holds_by_identifier is a hold that +# # the provider doesn't know about, which means it's expired +# # and we should get rid of it. +# for local_hold in list(local_holds_by_identifier.values()): +# if ( +# local_hold.license_pool.collection_id +# in self.collection_ids_for_sync +# ): +# self._db.delete(local_hold) +# +# # Now that we're in sync (or not), set last_loan_activity_sync +# # to the conservative value obtained earlier. +# if patron: +# patron.last_loan_activity_sync = last_loan_activity_sync +# +# __transaction.commit() +# return active_loans, active_holds diff --git a/src/palace/manager/service/container.py b/src/palace/manager/service/container.py index 2f5041e441..10d92c20ee 100644 --- a/src/palace/manager/service/container.py +++ b/src/palace/manager/service/container.py @@ -15,6 +15,8 @@ ) from palace.manager.service.logging.configuration import LoggingConfiguration from palace.manager.service.logging.container import Logging +from palace.manager.service.redis.configuration import RedisConfiguration +from palace.manager.service.redis.container import RedisContainer from palace.manager.service.search.configuration import SearchConfiguration from palace.manager.service.search.container import Search from palace.manager.service.sitewide import SitewideConfiguration @@ -65,6 +67,11 @@ class Services(DeclarativeContainer): IntegrationRegistryContainer, ) + redis = Container( + RedisContainer, + config=config.redis, + ) + def wire_container(container: Services) -> None: container.wire( @@ -97,6 +104,7 @@ def create_container() -> Services: "email": EmailConfiguration().dict(), "celery": CeleryConfiguration().dict(), "fcm": FcmConfiguration().dict(), + "redis": RedisConfiguration().dict(), } ) wire_container(container) diff --git a/src/palace/manager/service/redis/__init__.py b/src/palace/manager/service/redis/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/palace/manager/service/redis/configuration.py b/src/palace/manager/service/redis/configuration.py new file mode 100644 index 0000000000..2e1b495a1d --- /dev/null +++ b/src/palace/manager/service/redis/configuration.py @@ -0,0 +1,11 @@ +from pydantic import RedisDsn + +from palace.manager.service.configuration import ServiceConfiguration + + +class RedisConfiguration(ServiceConfiguration): + url: RedisDsn + key_prefix: str = "palace" + + class Config: + env_prefix = "PALACE_REDIS_" diff --git a/src/palace/manager/service/redis/container.py b/src/palace/manager/service/redis/container.py new file mode 100644 index 0000000000..1bbef5cfb4 --- /dev/null +++ b/src/palace/manager/service/redis/container.py @@ -0,0 +1,22 @@ +import redis +from dependency_injector import providers +from dependency_injector.containers import DeclarativeContainer + +from palace.manager.service.redis.key import RedisKeyGenerator +from palace.manager.service.redis.redis import Redis + + +class RedisContainer(DeclarativeContainer): + config = providers.Configuration() + + connection_pool: providers.Provider[redis.ConnectionPool] = providers.Singleton( + redis.ConnectionPool.from_url, url=config.url, decode_responses=True + ) + + key_generator: providers.Provider[RedisKeyGenerator] = providers.Singleton( + RedisKeyGenerator, prefix=config.key_prefix + ) + + client: providers.Provider[Redis] = providers.Singleton( + Redis, connection_pool=connection_pool, key_generator=key_generator + ) diff --git a/src/palace/manager/service/redis/exception.py b/src/palace/manager/service/redis/exception.py new file mode 100644 index 0000000000..4159e6aca9 --- /dev/null +++ b/src/palace/manager/service/redis/exception.py @@ -0,0 +1,9 @@ +from palace.manager.core.exceptions import BasePalaceException + + +class RedisKeyError(BasePalaceException, TypeError): + ... + + +class RedisValueError(BasePalaceException, ValueError): + ... diff --git a/src/palace/manager/service/redis/key.py b/src/palace/manager/service/redis/key.py new file mode 100644 index 0000000000..ba83048764 --- /dev/null +++ b/src/palace/manager/service/redis/key.py @@ -0,0 +1,32 @@ +from typing import Protocol, runtime_checkable + +from palace.manager.service.redis.exception import RedisKeyError + + +@runtime_checkable +class SupportsRedisKey(Protocol): + def __redis_key__(self) -> str: + ... + + +class RedisKeyGenerator: + SEPERATOR = "::" + + def __init__(self, prefix: str): + self.prefix = prefix + + def _stringify(self, key: SupportsRedisKey | str | int) -> str: + if isinstance(key, SupportsRedisKey): + return key.__redis_key__() + elif isinstance(key, str): + return key + elif isinstance(key, int): + return str(key) + else: + raise RedisKeyError( + f"Unsupported key type: {key} ({key.__class__.__name__})" + ) + + def __call__(self, *args: SupportsRedisKey | str | int) -> str: + key_strings = [self._stringify(k) for k in args] + return self.SEPERATOR.join([self.prefix, *key_strings]) diff --git a/src/palace/manager/service/redis/models/__init__.py b/src/palace/manager/service/redis/models/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/palace/manager/service/redis/models/patron_activity.py b/src/palace/manager/service/redis/models/patron_activity.py new file mode 100644 index 0000000000..86f3991b44 --- /dev/null +++ b/src/palace/manager/service/redis/models/patron_activity.py @@ -0,0 +1,114 @@ +from datetime import datetime +from enum import auto +from functools import cached_property +from types import TracebackType +from typing import Literal + +from backports.strenum import StrEnum + +from palace.manager.core.exceptions import BasePalaceException +from palace.manager.service.redis.redis import Redis +from palace.manager.sqlalchemy.model.collection import Collection +from palace.manager.sqlalchemy.model.patron import Patron +from palace.manager.util.datetime_helpers import utc_now + + +class PatronActivityError(BasePalaceException, RuntimeError): + ... + + +class PatronActivitySync: + class Status(StrEnum): + """The status of a patron activity sync task.""" + + # The task is currently running. + IN_PROGRESS = auto() + + # The task failed to complete. + FAILED = auto() + + IN_PROGRESS_TIMEOUT = 60 * 15 # 15 minutes + FAILED_TIMEOUT = 60 * 60 * 4 # 4 hours + SUCCESS_TIMEOUT = 60 * 60 * 12 # 12 hours + + UPDATE_SCRIPT = """ + if redis.call('GET', KEYS[1]) == ARGV[1] then + return redis.call('SET', KEYS[1], ARGV[2], 'XX', 'EX', ARGV[3]) + else + return nil + end + """ + + def __init__(self, redis_client: Redis, patron: Patron, collection: Collection): + self._redis_client = redis_client + self._patron = patron + self._collection = collection + self._update_script = self._redis_client.register_script(self.UPDATE_SCRIPT) + self._in_context_manager = False + self._context_manager_acquired = False + + @cached_property + def key(self) -> str: + return self._redis_client.get_key( + "PatronActivity", self._patron, self._collection + ) + + def status(self) -> datetime | Status | None: + status = self._redis_client.get(self.key) + if status is None: + return None + + if status in list(self.Status): + return self.Status(status) + + return datetime.fromisoformat(status) + + def acquire(self) -> bool: + acquired = self._redis_client.set( + self.key, + self.Status.IN_PROGRESS, + nx=True, + ex=self.IN_PROGRESS_TIMEOUT, + ) + return acquired is not None + + def clear(self) -> bool: + return self._redis_client.delete(self.key) == 1 + + def _update(self, status: str, timeout: int) -> bool: + value_set = self._update_script( + keys=[self.key], + args=[self.Status.IN_PROGRESS, status, timeout], + ) + return value_set is not None + + def complete(self, timestamp: datetime | None = None) -> bool: + timestamp = timestamp or utc_now() + return self._update(timestamp.isoformat(), self.SUCCESS_TIMEOUT) + + def fail(self) -> bool: + return self._update(self.Status.FAILED, self.FAILED_TIMEOUT) + + def __enter__(self) -> bool: + if self._in_context_manager: + raise PatronActivityError(f"Cannot nest {self.__class__.__name__}.") + self._in_context_manager = True + acquired = self.acquire() + self._context_manager_acquired = acquired + return acquired + + def __exit__( + self, + exctype: type[BaseException] | None, + excinst: BaseException | None, + exctb: TracebackType | None, + ) -> Literal[False]: + if self._context_manager_acquired: + if exctype is not None: + self.fail() + else: + self.complete() + + self._in_context_manager = False + self._context_manager_acquired = False + return False diff --git a/src/palace/manager/service/redis/redis.py b/src/palace/manager/service/redis/redis.py new file mode 100644 index 0000000000..ec07ff6891 --- /dev/null +++ b/src/palace/manager/service/redis/redis.py @@ -0,0 +1,101 @@ +from abc import ABC, abstractmethod +from collections.abc import Sequence +from typing import TYPE_CHECKING, Any + +import redis + +from palace.manager.service.redis.exception import RedisValueError +from palace.manager.service.redis.key import RedisKeyGenerator + +# We do this right now because we are using types-redis, which defines Redis as a generic type, +# even though it is not actually generic. Redis 5 now support type hints natively, so we we +# should be able to drop types-redis in the future. +# Currently, the build in type hints are incomplete at best, so types-redis does a better job. +# This GitHub issue is tracking the progress of the biggest blocker we have for using the +# built-in type hints: +# https://github.com/redis/redis-py/issues/2399 +if TYPE_CHECKING: + RedisClient = redis.Redis[str] +else: + RedisClient = redis.Redis + + +class RedisCommandArgsBase(ABC): + def __init__(self, name: str): + self.name = name + + @abstractmethod + def key_args(self, args: list[Any]) -> Sequence[str]: + ... + + +class RedisCommandArgs(RedisCommandArgsBase): + def __init__(self, name: str, *, args_start: int = 0, args_end: int | None = 1): + super().__init__(name) + self.args_start = args_start + self.args_end = args_end + + def key_args(self, args: list[Any]) -> Sequence[str]: + return [str(arg) for arg in args[self.args_start : self.args_end]] + + +class RedisVariableCommandArgs(RedisCommandArgsBase): + def __init__(self, name: str, *, key_index: int = 0): + super().__init__(name) + self.key_index = key_index + + def key_args(self, args: list[Any]) -> Sequence[str]: + keys = int(args[self.key_index]) + args_start = self.key_index + 1 + return [str(arg) for arg in args[args_start : args_start + keys]] + + +class RedisCommandNoArgs(RedisCommandArgsBase): + def key_args(self, args: list[Any]) -> Sequence[str]: + return [] + + +class Redis(RedisClient): + """ + A subclass of redis.Redis that adds the ability to check that keys are prefixed correctly. + + Some inspiration for this was taken from Kombu's Redis class. See: + https://github.com/celery/kombu/pull/1349 + """ + + PREFIXED_COMMANDS = [ + RedisCommandNoArgs("SCRIPT LOAD"), + RedisCommandArgs("KEYS"), + RedisCommandArgs("GET"), + RedisCommandArgs("SET"), + RedisCommandArgs("TTL"), + RedisCommandArgs("DEL", args_end=None), + RedisCommandArgs("EXPIRETIME"), + RedisVariableCommandArgs("EVALSHA", key_index=1), + ] + + def __init__(self, *args: Any, key_generator: RedisKeyGenerator, **kwargs: Any): + super().__init__(*args, **kwargs) + self.get_key: RedisKeyGenerator = key_generator + self.prefixed_lookup = {cmd.name: cmd for cmd in self.PREFIXED_COMMANDS} + self.auto_close_connection_pool = True + + def _check_prefix(self, *args: Any) -> None: + arg_list = list(args) + command = arg_list.pop(0) + prefix = self.get_key() + cmd_args = self.prefixed_lookup.get(command) + if cmd_args is not None: + for key in cmd_args.key_args(arg_list): + if not key.startswith(prefix): + raise RedisValueError( + f"Key {key} does not start with prefix {prefix}. Command {command} args: {arg_list}" + ) + else: + raise RedisValueError( + f"Command {command} is not checked for prefix. Args: {arg_list}" + ) + + def execute_command(self, *args: Any, **options: Any) -> Any: + self._check_prefix(*args) + return super().execute_command(*args, **options) diff --git a/src/palace/manager/sqlalchemy/model/collection.py b/src/palace/manager/sqlalchemy/model/collection.py index 3504e70b8e..ba09d1b21c 100644 --- a/src/palace/manager/sqlalchemy/model/collection.py +++ b/src/palace/manager/sqlalchemy/model/collection.py @@ -158,6 +158,9 @@ class Collection(Base, HasSessionCache): def __repr__(self) -> str: return f'' + def __redis_key__(self) -> str: + return f"Collection::{self.id}" + def cache_key(self) -> tuple[str | None, str | None]: return self.name, self.integration_configuration.protocol diff --git a/src/palace/manager/sqlalchemy/model/patron.py b/src/palace/manager/sqlalchemy/model/patron.py index fcfee128c1..e0d04867b2 100644 --- a/src/palace/manager/sqlalchemy/model/patron.py +++ b/src/palace/manager/sqlalchemy/model/patron.py @@ -216,6 +216,9 @@ def date(d): date(self.last_external_sync), ) + def __redis_key__(self) -> str: + return f"Patron::{self.id}" + def identifier_to_remote_service(self, remote_data_source, generator=None): """Find or randomly create an identifier to use when identifying this patron to a remote service. diff --git a/tests/conftest.py b/tests/conftest.py index e09335a58b..93e3085d3e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -16,6 +16,7 @@ "tests.fixtures.flask", "tests.fixtures.library", "tests.fixtures.odl", + "tests.fixtures.redis", "tests.fixtures.s3", "tests.fixtures.search", "tests.fixtures.services", diff --git a/tests/fixtures/redis.py b/tests/fixtures/redis.py new file mode 100644 index 0000000000..68122ead74 --- /dev/null +++ b/tests/fixtures/redis.py @@ -0,0 +1,55 @@ +from collections.abc import Generator +from contextlib import contextmanager + +import pytest +from pydantic import RedisDsn +from typing_extensions import Self + +from palace.manager.service.redis.redis import Redis +from tests.fixtures.config import FixtureTestUrlConfiguration +from tests.fixtures.database import TestIdFixture +from tests.fixtures.services import ServicesFixture + + +class RedisTestConfiguration(FixtureTestUrlConfiguration): + url: RedisDsn + + class Config: + env_prefix = "PALACE_TEST_REDIS_" + + +class RedisFixture: + def __init__(self, test_id: TestIdFixture, services_fixture: ServicesFixture): + self.test_id = test_id + self.services_fixture = services_fixture + self.config = RedisTestConfiguration.from_env() + + self.key_prefix = f"test::{self.test_id.id}" + self.services_fixture.services.config.from_dict( + {"redis": {"url": self.config.url, "key_prefix": self.key_prefix}} + ) + self.client: Redis = self.services_fixture.services.redis.client() + + def close(self): + keys = self.client.keys(f"{self.key_prefix}*") + if keys: + self.client.delete(*keys) + + @classmethod + @contextmanager + def fixture( + cls, test_id: TestIdFixture, services_fixture: ServicesFixture + ) -> Generator[Self, None, None]: + fixture = cls(test_id, services_fixture) + try: + yield fixture + finally: + fixture.close() + + +@pytest.fixture(scope="function") +def redis_fixture( + function_test_id: TestIdFixture, services_fixture: ServicesFixture +) -> Generator[RedisFixture, None, None]: + with RedisFixture.fixture(function_test_id, services_fixture) as fixture: + yield fixture diff --git a/tests/manager/service/redis/__init__.py b/tests/manager/service/redis/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/manager/service/redis/models/__init__.py b/tests/manager/service/redis/models/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/manager/service/redis/models/test_patron_activity.py b/tests/manager/service/redis/models/test_patron_activity.py new file mode 100644 index 0000000000..d0e3e1bd2a --- /dev/null +++ b/tests/manager/service/redis/models/test_patron_activity.py @@ -0,0 +1,208 @@ +import datetime + +import pytest +from freezegun import freeze_time + +from palace.manager.service.redis.models.patron_activity import ( + PatronActivityError, + PatronActivitySync, +) +from palace.manager.util.datetime_helpers import utc_now +from tests.fixtures.database import DatabaseTransactionFixture +from tests.fixtures.redis import RedisFixture + + +class PatronActivitySyncFixture: + def __init__(self, db: DatabaseTransactionFixture, redis_fixture: RedisFixture): + self._db = db + self._redis = redis_fixture + + self.patron = db.patron() + self.collection = db.collection() + self.patron_activity = PatronActivitySync( + redis_fixture.client, self.patron, self.collection + ) + self.timeout_slop = 5 + + +@pytest.fixture +def patron_activity_sync_fixture( + db: DatabaseTransactionFixture, redis_fixture: RedisFixture +): + return PatronActivitySyncFixture(db, redis_fixture) + + +class TestPatronActivitySync: + def test_key( + self, + patron_activity_sync_fixture: PatronActivitySyncFixture, + redis_fixture: RedisFixture, + ): + key = patron_activity_sync_fixture.patron_activity.key + assert key.startswith(redis_fixture.key_prefix) + patron_key = patron_activity_sync_fixture.patron.__redis_key__() + collection_key = patron_activity_sync_fixture.collection.__redis_key__() + assert key.endswith(f"::PatronActivity::{patron_key}::{collection_key}") + + def test_status(self, patron_activity_sync_fixture: PatronActivitySyncFixture): + patron_activity = patron_activity_sync_fixture.patron_activity + + # If no status is set, we should return None + assert patron_activity.status() is None + + # If we set a status, we should be able to retrieve it + assert patron_activity.acquire() is True + assert patron_activity.status() == patron_activity.Status.IN_PROGRESS + + # If we complete the status, we should be able to retrieve the timestamp + timestamp = datetime.datetime(1995, 1, 1, 0, 0, 0) + assert patron_activity.complete(timestamp) is True + assert patron_activity.status() == timestamp + + # If the status gets cleared we will return None + assert patron_activity.clear() is True + assert patron_activity.status() is None + + def test_acquire( + self, + patron_activity_sync_fixture: PatronActivitySyncFixture, + redis_fixture: RedisFixture, + ): + patron_activity = patron_activity_sync_fixture.patron_activity + + # If we acquire the status, we should return True + assert patron_activity.acquire() is True + + # If we try to acquire the status again, we should return False + assert patron_activity.acquire() is False + + # If we clear the status, we should be able to acquire it again + assert patron_activity.clear() is True + assert patron_activity.acquire() is True + + # We set an expiry time for the status, so it will automatically clear. + timeout = patron_activity.IN_PROGRESS_TIMEOUT + min_timeout = timeout - patron_activity_sync_fixture.timeout_slop + max_timeout = timeout + assert ( + min_timeout < redis_fixture.client.ttl(patron_activity.key) <= max_timeout + ) + + def test_clear(self, patron_activity_sync_fixture: PatronActivitySyncFixture): + patron_activity = patron_activity_sync_fixture.patron_activity + + # If we clear a status that doesn't exist, we should return False + assert patron_activity.clear() is False + + # If we acquire a status, we should be able to clear it + assert patron_activity.acquire() is True + assert patron_activity.clear() is True + + # If we try to clear it again, we should return False + assert patron_activity.clear() is False + + def test_complete( + self, + patron_activity_sync_fixture: PatronActivitySyncFixture, + redis_fixture: RedisFixture, + ): + patron_activity = patron_activity_sync_fixture.patron_activity + + # If we try to complete a status that doesn't exist, we should return False + assert patron_activity.complete() is False + + # If we acquire a status, we should be able to complete it. If no timestamp is provided, we should + # use the current time. + assert patron_activity.acquire() is True + test_time = datetime.datetime(1999, 9, 9, 9, 9, 9, tzinfo=datetime.timezone.utc) + with freeze_time(test_time): + assert patron_activity.complete() is True + assert patron_activity.status() == test_time + + # Trying to complete the status again should return False + assert patron_activity.complete() is False + + # Trying to fail a complete status should return False + assert patron_activity.fail() is False + + # We can also provide a timestamp to complete the status + assert patron_activity.clear() is True + assert patron_activity.acquire() is True + assert patron_activity.complete(test_time) is True + assert patron_activity.status() == test_time + + # We set a timeout for the status, so it will automatically clear + timeout = patron_activity.SUCCESS_TIMEOUT + min_timeout = timeout - patron_activity_sync_fixture.timeout_slop + max_timeout = timeout + assert ( + min_timeout < redis_fixture.client.ttl(patron_activity.key) <= max_timeout + ) + + def test_fail( + self, + patron_activity_sync_fixture: PatronActivitySyncFixture, + redis_fixture: RedisFixture, + ): + patron_activity = patron_activity_sync_fixture.patron_activity + + # If we try to fail a status that doesn't exist, we should return False + assert patron_activity.fail() is False + + # If we acquire a status, we should be able to fail it. + assert patron_activity.acquire() is True + assert patron_activity.fail() is True + assert patron_activity.status() == patron_activity.Status.FAILED + + # Trying to fail the status again should return False + assert patron_activity.fail() is False + + # Trying to complete a failed status should return False + assert patron_activity.complete() is False + + # We set a timeout for the status, so it will automatically clear + timeout = patron_activity.FAILED_TIMEOUT + min_timeout = timeout - patron_activity_sync_fixture.timeout_slop + max_timeout = timeout + assert ( + min_timeout < redis_fixture.client.ttl(patron_activity.key) <= max_timeout + ) + + def test_context_manager( + self, patron_activity_sync_fixture: PatronActivitySyncFixture + ): + patron_activity = patron_activity_sync_fixture.patron_activity + + # The context manager should acquire the status when entered and complete it when exited + test_time = utc_now() + with freeze_time(test_time): + with patron_activity as acquired: + assert acquired is True + assert patron_activity.status() == patron_activity.Status.IN_PROGRESS + assert patron_activity.status() == test_time + + # If there is an exception, the status should be failed + patron_activity.clear() + with pytest.raises(Exception): + with patron_activity as acquired: + assert acquired is True + raise Exception() + assert patron_activity.status() == patron_activity.Status.FAILED + + # If the status is already acquired, we should not be able to acquire it again + patron_activity.clear() + patron_activity.acquire() + with patron_activity as acquired: + assert acquired is False + # The context manager should not have changed the status + assert patron_activity.status() == patron_activity.Status.IN_PROGRESS + + # Nesting the context manager should raise an error + patron_activity.clear() + with pytest.raises(PatronActivityError): + with patron_activity as acquired1: + assert acquired1 is True + with patron_activity: + # We should never get here + assert False + assert patron_activity.status() == patron_activity.Status.FAILED diff --git a/tests/manager/service/redis/test_key.py b/tests/manager/service/redis/test_key.py new file mode 100644 index 0000000000..30d99107da --- /dev/null +++ b/tests/manager/service/redis/test_key.py @@ -0,0 +1,47 @@ +import pytest + +from palace.manager.service.redis.exception import RedisKeyError +from tests.fixtures.redis import RedisFixture + + +class MockSupportsRedisKey: + def __init__(self, key: str = "test"): + self.key = key + + def __redis_key__(self) -> str: + return self.key + + +class TestRedisKeyGenerator: + def test___call__(self, redis_fixture: RedisFixture): + key_prefix = redis_fixture.key_prefix + key_generator = redis_fixture.client.get_key + sep = key_generator.SEPERATOR + + # No args returns just the key prefix + key = key_generator() + assert key == key_prefix + + # Simple string key + test_key = "test" + key = key_generator(test_key) + + # Key always includes the key prefix and is separated by the RedisKeyGenerator.SEPERATOR + assert key == f"{key_prefix}{sep}{test_key}" + + # Multiple args are all included and separated by the RedisKeyGenerator.SEPERATOR + key = key_generator("test", "key", "generator") + assert key == f"{key_prefix}{sep}test{sep}key{sep}generator" + + # ints are also supported and are converted to strings + key = key_generator(1, 2, 3) + assert key == f"{key_prefix}{sep}1{sep}2{sep}3" + + # SupportsRedisKey objects are supported, and their __redis_key__ method is called to get the key + key = key_generator(MockSupportsRedisKey("test"), MockSupportsRedisKey("key")) + assert key == f"{key_prefix}{sep}test{sep}key" + + # Unsupported types raise a RedisKeyError + with pytest.raises(RedisKeyError) as exc_info: + key_generator([1, 2, 3]) # type: ignore[arg-type] + assert "Unsupported key type: [1, 2, 3] (list)" in str(exc_info.value) diff --git a/tests/manager/service/redis/test_redis.py b/tests/manager/service/redis/test_redis.py new file mode 100644 index 0000000000..5fd78a6dff --- /dev/null +++ b/tests/manager/service/redis/test_redis.py @@ -0,0 +1,42 @@ +import pytest + +from palace.manager.service.redis.exception import RedisValueError +from tests.fixtures.redis import RedisFixture + + +class TestRedis: + def test_client(self, redis_fixture: RedisFixture): + # Do a bit of basic testing to make sure the client is working + client = redis_fixture.client + key = client.get_key("test") + redis_fixture.client.set(key, "value") + assert redis_fixture.client.get(key) == "value" + + def test_prefix_check(self, redis_fixture: RedisFixture): + # Our version of the redis client checks that keys are prefixed correctly + client = redis_fixture.client + + key = "test" + with pytest.raises(RedisValueError) as exc_info: + client.set(key, "value") + assert ( + f"Key {key} does not start with prefix {redis_fixture.key_prefix}" + in str(exc_info.value) + ) + + # We also handle commands with multiple keys + key1 = client.get_key("test1") + key2 = client.get_key("test2") + key3 = "unprefixed" + + with pytest.raises(RedisValueError) as exc_info: + client.delete(key1, key2, key3) + assert ( + f"Key {key3} does not start with prefix {redis_fixture.key_prefix}" + in str(exc_info.value) + ) + + # If we pass a command that isn't checked for a prefix, we raise an error + with pytest.raises(RedisValueError) as exc_info: + client.execute_command("UNKNOWN", key1) + assert "Command UNKNOWN is not checked for prefix" in str(exc_info.value) diff --git a/tests/manager/sqlalchemy/model/test_collection.py b/tests/manager/sqlalchemy/model/test_collection.py index 39c7b37077..c1034589d3 100644 --- a/tests/manager/sqlalchemy/model/test_collection.py +++ b/tests/manager/sqlalchemy/model/test_collection.py @@ -597,3 +597,9 @@ def test_delete( # We've now deleted every LicensePool created for this test. assert 0 == db.session.query(LicensePool).count() assert [] == work2.license_pools + + def test___redis_key__(self, example_collection_fixture: ExampleCollectionFixture): + collection = example_collection_fixture.collection + + # The key is based on the collection's ID. + assert collection.__redis_key__() == f"Collection::{collection.id}" diff --git a/tests/manager/sqlalchemy/model/test_patron.py b/tests/manager/sqlalchemy/model/test_patron.py index 3d1d9f91bb..2e883094b7 100644 --- a/tests/manager/sqlalchemy/model/test_patron.py +++ b/tests/manager/sqlalchemy/model/test_patron.py @@ -364,6 +364,10 @@ def test_repr(self, db: DatabaseTransactionFixture): == repr(patron) ) + def test___redis_key__(self, db: DatabaseTransactionFixture): + patron = db.patron(external_identifier="a patron") + assert patron.__redis_key__() == f"Patron::{patron.id}" + def test_identifier_to_remote_service(self, db: DatabaseTransactionFixture): # Here's a patron. patron = db.patron() diff --git a/tox.ini b/tox.ini index 9dba423c89..b0b53b0811 100644 --- a/tox.ini +++ b/tox.ini @@ -19,10 +19,12 @@ setenv = docker: PALACE_TEST_MINIO_URL_SCHEME=http docker: PALACE_TEST_MINIO_USER=palace docker: PALACE_TEST_MINIO_PASSWORD=12345678901234567890 + docker: PALACE_TEST_REDIS_URL_SCHEME=redis docker = docker: os-circ docker: db-circ docker: minio-circ + docker: redis-circ allowlist_externals = python poetry @@ -71,6 +73,13 @@ expose = host_var = PALACE_TEST_MINIO_URL_HOST +[docker:redis-circ] +image = redis:7 +expose = + PALACE_TEST_REDIS_URL_PORT=6379/tcp +host_var = + PALACE_TEST_REDIS_URL_HOST + [gh-actions] python = 3.10: py310