Skip to content

Commit

Permalink
feat: rework client to only allow a single provider
Browse files Browse the repository at this point in the history
  • Loading branch information
jedel1043 committed Dec 20, 2024
1 parent c24f411 commit 9d36b7f
Show file tree
Hide file tree
Showing 9 changed files with 216 additions and 267 deletions.
4 changes: 4 additions & 0 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,10 @@ on:
branches:
- main

concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true

jobs:
inclusive-naming-check:
name: Inclusive naming check
Expand Down
45 changes: 22 additions & 23 deletions charmcraft.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,6 @@ parts:
build-snaps:
- astral-uv
charm-requirements: ["requirements.txt"]
charm-binary-python-packages:
- rpds_py ~= 0.22.3
override-build: |
just requirements
craftctl default
Expand All @@ -42,32 +40,33 @@ subordinate: true
requires:
filesystem:
interface: filesystem_info
limit: 1
juju-info:
interface: juju-info
scope: container

config:
options:
mounts:
default: "{}"
mountpoint:
description: Location to mount the filesystem on the machine.
type: string
noexec:
default: false
description: |
Information to mount filesystems on the machine. This is specified as a JSON object string.
Example usage:
```bash
$ juju config filesystem-client \
mounts=<<EOF
{
"cephfs": {
"mountpoint": "/scratch",
"noexec": true
},
"nfs": {
"mountpoint": "/data",
"nosuid": true,
"nodev": true,
"read-only": true,
}
}
EOF
```
Block execution of binaries on the filesystem.
type: boolean
nosuid:
default: false
description: |
Do not honor suid and sgid bits on the filesystem.
type: boolean
nodev:
default: false
description: |
Blocking interpretation of character and/or block
devices on the filesystem.
type: boolean
read-only:
default: false
description: Mount filesystem as read-only.
type: boolean
6 changes: 2 additions & 4 deletions lib/charms/filesystem_client/v0/filesystem_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -582,16 +582,14 @@ class FilesystemRequires(_BaseInterface):
def __init__(self, charm: CharmBase, relation_name: str) -> None:
super().__init__(charm, relation_name)
self.framework.observe(charm.on[relation_name].relation_changed, self._on_relation_changed)
self.framework.observe(
charm.on[relation_name].relation_departed, self._on_relation_departed
)
self.framework.observe(charm.on[relation_name].relation_broken, self._on_relation_broken)

def _on_relation_changed(self, event: RelationChangedEvent) -> None:
"""Handle when the databag between client and server has been updated."""
_logger.debug("emitting `MountFilesystem` event from `RelationChanged` hook")
self.on.mount_filesystem.emit(event.relation, app=event.app, unit=event.unit)

def _on_relation_departed(self, event: RelationDepartedEvent) -> None:
def _on_relation_broken(self, event: RelationDepartedEvent) -> None:
"""Handle when server departs integration."""
_logger.debug("emitting `UmountFilesystem` event from `RelationDeparted` hook")
self.on.umount_filesystem.emit(event.relation, app=event.app, unit=event.unit)
Expand Down
4 changes: 1 addition & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,7 @@ name = "filesystem-client"
version = "0.0"
requires-python = "==3.12.*"
dependencies = [
"ops ~= 2.8",
"jsonschema ~= 4.23.0",
"rpds_py ~= 0.22.3"
"ops ~= 2.8"
]

[project.optional-dependencies]
Expand Down
121 changes: 57 additions & 64 deletions src/charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,42 +4,40 @@

"""Charm for the filesystem client."""

import json
import logging
from collections import Counter
from dataclasses import dataclass

import ops
from charms.filesystem_client.v0.filesystem_info import FilesystemRequires
from jsonschema import ValidationError, validate

from utils.manager import MountsManager

logger = logging.getLogger(__name__)

CONFIG_SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"additionalProperties": {
"type": "object",
"required": ["mountpoint"],
"properties": {
"mountpoint": {"type": "string"},
"noexec": {"type": "boolean"},
"nosuid": {"type": "boolean"},
"nodev": {"type": "boolean"},
"read-only": {"type": "boolean"},
},
},
}
_logger = logging.getLogger(__name__)


class StopCharmError(Exception):
"""Exception raised when a method needs to finish the execution of the charm code."""

def __init__(self, status: ops.StatusBase):
def __init__(self, status: ops.StatusBase) -> None:
self.status = status


@dataclass(frozen=True)
class CharmConfig:
"""Configuration for the charm."""

mountpoint: str
"""Location to mount the filesystem on the machine."""
noexec: bool
"""Block execution of binaries on the filesystem."""
nosuid: bool
"""Do not honour suid and sgid bits on the filesystem."""
nodev: bool
"""Blocking interpretation of character and/or block."""
read_only: bool
"""Mount filesystem as read-only."""


# Trying to use a delta charm (one method per event) proved to be a bit unwieldy, since
# we would have to handle multiple updates at once:
# - mount requests
Expand All @@ -51,11 +49,11 @@ def __init__(self, status: ops.StatusBase):
# mount requests.
#
# A holistic charm (one method for all events) was a lot easier to deal with,
# simplifying the code to handle all the multiple relations.
# simplifying the code to handle all the events.
class FilesystemClientCharm(ops.CharmBase):
"""Charm the application."""

def __init__(self, framework: ops.Framework):
def __init__(self, framework: ops.Framework) -> None:
super().__init__(framework)

self._filesystems = FilesystemRequires(self, "filesystem")
Expand All @@ -66,71 +64,66 @@ def __init__(self, framework: ops.Framework):
framework.observe(self._filesystems.on.mount_filesystem, self._handle_event)
framework.observe(self._filesystems.on.umount_filesystem, self._handle_event)

def _handle_event(self, event: ops.EventBase) -> None: # noqa: C901
def _handle_event(self, event: ops.EventBase) -> None:
"""Handle a Juju event."""
try:
self.unit.status = ops.MaintenanceStatus("Updating status.")

# CephFS is not supported on LXD containers.
if not self._mounts_manager.supported():
self.unit.status = ops.BlockedStatus("Cannot mount filesystems on LXD containers.")
return

self._ensure_installed()
config = self._get_config()
self._mount_filesystems(config)
except StopCharmError as e:
# This was the cleanest way to ensure the inner methods can still return prematurely
# when an error occurs.
self.app.status = e.status
self.unit.status = e.status
return

self.unit.status = ops.ActiveStatus("Mounted filesystems.")
self.unit.status = ops.ActiveStatus(f"Mounted filesystem at `{config.mountpoint}`.")

def _ensure_installed(self):
def _ensure_installed(self) -> None:
"""Ensure the required packages are installed into the unit."""
if not self._mounts_manager.installed:
self.unit.status = ops.MaintenanceStatus("Installing required packages.")
self._mounts_manager.install()

def _get_config(self) -> dict[str, dict[str, str | bool]]:
def _get_config(self) -> CharmConfig:
"""Get and validate the configuration of the charm."""
try:
config = json.loads(str(self.config.get("mounts", "")))
validate(config, CONFIG_SCHEMA)
config: dict[str, dict[str, str | bool]] = config
for fs, opts in config.items():
for opt in ["noexec", "nosuid", "nodev", "read-only"]:
opts[opt] = opts.get(opt, False)
return config
except (json.JSONDecodeError, ValidationError) as e:
if not (mountpoint := self.config.get("mountpoint")):
raise StopCharmError(ops.BlockedStatus("Missing `mountpoint` in config."))

return CharmConfig(
mountpoint=str(mountpoint),
noexec=bool(self.config.get("noexec")),
nosuid=bool(self.config.get("nosuid")),
nodev=bool(self.config.get("nodev")),
read_only=bool(self.config.get("read-only")),
)

def _mount_filesystems(self, config: CharmConfig) -> None:
"""Mount the filesystem for the charm."""
endpoints = self._filesystems.endpoints
if not endpoints:
raise StopCharmError(
ops.BlockedStatus(f"invalid configuration for option `mounts`. reason:\n{e}")
ops.BlockedStatus("Waiting for an integration with a filesystem provider.")
)

def _mount_filesystems(self, config: dict[str, dict[str, str | bool]]):
"""Mount all available filesystems for the charm."""
endpoints = self._filesystems.endpoints
for fs_type, count in Counter(
[endpoint.info.filesystem_type() for endpoint in endpoints]
).items():
if count > 1:
raise StopCharmError(
ops.BlockedStatus(f"Too many relations for mount type `{fs_type}`.")
)
# This is limited to 1 relation.
endpoint = endpoints[0]

self.unit.status = ops.MaintenanceStatus("Ensuring filesystems are mounted.")
self.unit.status = ops.MaintenanceStatus("Mounting filesystem.")

with self._mounts_manager.mounts() as mounts:
for endpoint in endpoints:
fs_type = endpoint.info.filesystem_type()
if not (options := config.get(fs_type)):
raise StopCharmError(
ops.BlockedStatus(f"Missing configuration for mount type `{fs_type}`.")
)

mountpoint = str(options["mountpoint"])

opts = []
opts.append("noexec" if options.get("noexec") else "exec")
opts.append("nosuid" if options.get("nosuid") else "suid")
opts.append("nodev" if options.get("nodev") else "dev")
opts.append("ro" if options.get("read-only") else "rw")
mounts.add(info=endpoint.info, mountpoint=mountpoint, options=opts)
opts = []
opts.append("noexec" if config.noexec else "exec")
opts.append("nosuid" if config.nosuid else "suid")
opts.append("nodev" if config.nodev else "dev")
opts.append("ro" if config.read_only else "rw")
mounts.add(info=endpoint.info, mountpoint=config.mountpoint, options=opts)


if __name__ == "__main__": # pragma: nocover
Expand Down
Loading

0 comments on commit 9d36b7f

Please sign in to comment.