From a0519cb4d8c5eba979902677de1d2fdee1293f0d Mon Sep 17 00:00:00 2001 From: MiaAltieri Date: Thu, 15 Aug 2024 08:56:58 +0000 Subject: [PATCH 01/19] support external connections --- poetry.lock | 411 ++++++++++++++++++++++++++--------------------- pyproject.toml | 1 + src/charm.py | 60 ++++--- src/node_port.py | 146 +++++++++++++++++ 4 files changed, 399 insertions(+), 219 deletions(-) create mode 100644 src/node_port.py diff --git a/poetry.lock b/poetry.lock index 8d45d017..d81fb76a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -92,22 +92,22 @@ test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"] [[package]] name = "attrs" -version = "23.2.0" +version = "24.2.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.7" files = [ - {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, - {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, + {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, + {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, ] [package.extras] -cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] -dev = ["attrs[tests]", "pre-commit"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] -tests = ["attrs[tests-no-zope]", "zope-interface"] -tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] -tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] [[package]] name = "bcrypt" @@ -173,63 +173,78 @@ files = [ [[package]] name = "cffi" -version = "1.16.0" +version = "1.17.0" description = "Foreign Function Interface for Python calling C code." optional = false python-versions = ">=3.8" files = [ - {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"}, - {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"}, - {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"}, - {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"}, - {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"}, - {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"}, - {file = "cffi-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404"}, - {file = "cffi-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e"}, - {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc"}, - {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb"}, - {file = "cffi-1.16.0-cp311-cp311-win32.whl", hash = "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab"}, - {file = "cffi-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba"}, - {file = "cffi-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956"}, - {file = "cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969"}, - {file = "cffi-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520"}, - {file = "cffi-1.16.0-cp312-cp312-win32.whl", hash = "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b"}, - {file = "cffi-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235"}, - {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"}, - {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"}, - {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"}, - {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"}, - {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"}, - {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"}, - {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"}, - {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"}, - {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"}, - {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"}, + {file = "cffi-1.17.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f9338cc05451f1942d0d8203ec2c346c830f8e86469903d5126c1f0a13a2bcbb"}, + {file = "cffi-1.17.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a0ce71725cacc9ebf839630772b07eeec220cbb5f03be1399e0457a1464f8e1a"}, + {file = "cffi-1.17.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c815270206f983309915a6844fe994b2fa47e5d05c4c4cef267c3b30e34dbe42"}, + {file = "cffi-1.17.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6bdcd415ba87846fd317bee0774e412e8792832e7805938987e4ede1d13046d"}, + {file = "cffi-1.17.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a98748ed1a1df4ee1d6f927e151ed6c1a09d5ec21684de879c7ea6aa96f58f2"}, + {file = "cffi-1.17.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0a048d4f6630113e54bb4b77e315e1ba32a5a31512c31a273807d0027a7e69ab"}, + {file = "cffi-1.17.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24aa705a5f5bd3a8bcfa4d123f03413de5d86e497435693b638cbffb7d5d8a1b"}, + {file = "cffi-1.17.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:856bf0924d24e7f93b8aee12a3a1095c34085600aa805693fb7f5d1962393206"}, + {file = "cffi-1.17.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:4304d4416ff032ed50ad6bb87416d802e67139e31c0bde4628f36a47a3164bfa"}, + {file = "cffi-1.17.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:331ad15c39c9fe9186ceaf87203a9ecf5ae0ba2538c9e898e3a6967e8ad3db6f"}, + {file = "cffi-1.17.0-cp310-cp310-win32.whl", hash = "sha256:669b29a9eca6146465cc574659058ed949748f0809a2582d1f1a324eb91054dc"}, + {file = "cffi-1.17.0-cp310-cp310-win_amd64.whl", hash = "sha256:48b389b1fd5144603d61d752afd7167dfd205973a43151ae5045b35793232aa2"}, + {file = "cffi-1.17.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c5d97162c196ce54af6700949ddf9409e9833ef1003b4741c2b39ef46f1d9720"}, + {file = "cffi-1.17.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5ba5c243f4004c750836f81606a9fcb7841f8874ad8f3bf204ff5e56332b72b9"}, + {file = "cffi-1.17.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bb9333f58fc3a2296fb1d54576138d4cf5d496a2cc118422bd77835e6ae0b9cb"}, + {file = "cffi-1.17.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:435a22d00ec7d7ea533db494da8581b05977f9c37338c80bc86314bec2619424"}, + {file = "cffi-1.17.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1df34588123fcc88c872f5acb6f74ae59e9d182a2707097f9e28275ec26a12d"}, + {file = "cffi-1.17.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df8bb0010fdd0a743b7542589223a2816bdde4d94bb5ad67884348fa2c1c67e8"}, + {file = "cffi-1.17.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8b5b9712783415695663bd463990e2f00c6750562e6ad1d28e072a611c5f2a6"}, + {file = "cffi-1.17.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ffef8fd58a36fb5f1196919638f73dd3ae0db1a878982b27a9a5a176ede4ba91"}, + {file = "cffi-1.17.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e67d26532bfd8b7f7c05d5a766d6f437b362c1bf203a3a5ce3593a645e870b8"}, + {file = "cffi-1.17.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:45f7cd36186db767d803b1473b3c659d57a23b5fa491ad83c6d40f2af58e4dbb"}, + {file = "cffi-1.17.0-cp311-cp311-win32.whl", hash = "sha256:a9015f5b8af1bb6837a3fcb0cdf3b874fe3385ff6274e8b7925d81ccaec3c5c9"}, + {file = "cffi-1.17.0-cp311-cp311-win_amd64.whl", hash = "sha256:b50aaac7d05c2c26dfd50c3321199f019ba76bb650e346a6ef3616306eed67b0"}, + {file = "cffi-1.17.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aec510255ce690d240f7cb23d7114f6b351c733a74c279a84def763660a2c3bc"}, + {file = "cffi-1.17.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2770bb0d5e3cc0e31e7318db06efcbcdb7b31bcb1a70086d3177692a02256f59"}, + {file = "cffi-1.17.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:db9a30ec064129d605d0f1aedc93e00894b9334ec74ba9c6bdd08147434b33eb"}, + {file = "cffi-1.17.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a47eef975d2b8b721775a0fa286f50eab535b9d56c70a6e62842134cf7841195"}, + {file = "cffi-1.17.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f3e0992f23bbb0be00a921eae5363329253c3b86287db27092461c887b791e5e"}, + {file = "cffi-1.17.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6107e445faf057c118d5050560695e46d272e5301feffda3c41849641222a828"}, + {file = "cffi-1.17.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb862356ee9391dc5a0b3cbc00f416b48c1b9a52d252d898e5b7696a5f9fe150"}, + {file = "cffi-1.17.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c1c13185b90bbd3f8b5963cd8ce7ad4ff441924c31e23c975cb150e27c2bf67a"}, + {file = "cffi-1.17.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:17c6d6d3260c7f2d94f657e6872591fe8733872a86ed1345bda872cfc8c74885"}, + {file = "cffi-1.17.0-cp312-cp312-win32.whl", hash = "sha256:c3b8bd3133cd50f6b637bb4322822c94c5ce4bf0d724ed5ae70afce62187c492"}, + {file = "cffi-1.17.0-cp312-cp312-win_amd64.whl", hash = "sha256:dca802c8db0720ce1c49cce1149ff7b06e91ba15fa84b1d59144fef1a1bc7ac2"}, + {file = "cffi-1.17.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6ce01337d23884b21c03869d2f68c5523d43174d4fc405490eb0091057943118"}, + {file = "cffi-1.17.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cab2eba3830bf4f6d91e2d6718e0e1c14a2f5ad1af68a89d24ace0c6b17cced7"}, + {file = "cffi-1.17.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:14b9cbc8f7ac98a739558eb86fabc283d4d564dafed50216e7f7ee62d0d25377"}, + {file = "cffi-1.17.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b00e7bcd71caa0282cbe3c90966f738e2db91e64092a877c3ff7f19a1628fdcb"}, + {file = "cffi-1.17.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:41f4915e09218744d8bae14759f983e466ab69b178de38066f7579892ff2a555"}, + {file = "cffi-1.17.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4760a68cab57bfaa628938e9c2971137e05ce48e762a9cb53b76c9b569f1204"}, + {file = "cffi-1.17.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:011aff3524d578a9412c8b3cfaa50f2c0bd78e03eb7af7aa5e0df59b158efb2f"}, + {file = "cffi-1.17.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:a003ac9edc22d99ae1286b0875c460351f4e101f8c9d9d2576e78d7e048f64e0"}, + {file = "cffi-1.17.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ef9528915df81b8f4c7612b19b8628214c65c9b7f74db2e34a646a0a2a0da2d4"}, + {file = "cffi-1.17.0-cp313-cp313-win32.whl", hash = "sha256:70d2aa9fb00cf52034feac4b913181a6e10356019b18ef89bc7c12a283bf5f5a"}, + {file = "cffi-1.17.0-cp313-cp313-win_amd64.whl", hash = "sha256:b7b6ea9e36d32582cda3465f54c4b454f62f23cb083ebc7a94e2ca6ef011c3a7"}, + {file = "cffi-1.17.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:964823b2fc77b55355999ade496c54dde161c621cb1f6eac61dc30ed1b63cd4c"}, + {file = "cffi-1.17.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:516a405f174fd3b88829eabfe4bb296ac602d6a0f68e0d64d5ac9456194a5b7e"}, + {file = "cffi-1.17.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dec6b307ce928e8e112a6bb9921a1cb00a0e14979bf28b98e084a4b8a742bd9b"}, + {file = "cffi-1.17.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4094c7b464cf0a858e75cd14b03509e84789abf7b79f8537e6a72152109c76e"}, + {file = "cffi-1.17.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2404f3de742f47cb62d023f0ba7c5a916c9c653d5b368cc966382ae4e57da401"}, + {file = "cffi-1.17.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3aa9d43b02a0c681f0bfbc12d476d47b2b2b6a3f9287f11ee42989a268a1833c"}, + {file = "cffi-1.17.0-cp38-cp38-win32.whl", hash = "sha256:0bb15e7acf8ab35ca8b24b90af52c8b391690ef5c4aec3d31f38f0d37d2cc499"}, + {file = "cffi-1.17.0-cp38-cp38-win_amd64.whl", hash = "sha256:93a7350f6706b31f457c1457d3a3259ff9071a66f312ae64dc024f049055f72c"}, + {file = "cffi-1.17.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1a2ddbac59dc3716bc79f27906c010406155031a1c801410f1bafff17ea304d2"}, + {file = "cffi-1.17.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6327b572f5770293fc062a7ec04160e89741e8552bf1c358d1a23eba68166759"}, + {file = "cffi-1.17.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbc183e7bef690c9abe5ea67b7b60fdbca81aa8da43468287dae7b5c046107d4"}, + {file = "cffi-1.17.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bdc0f1f610d067c70aa3737ed06e2726fd9d6f7bfee4a351f4c40b6831f4e82"}, + {file = "cffi-1.17.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6d872186c1617d143969defeadac5a904e6e374183e07977eedef9c07c8953bf"}, + {file = "cffi-1.17.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0d46ee4764b88b91f16661a8befc6bfb24806d885e27436fdc292ed7e6f6d058"}, + {file = "cffi-1.17.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f76a90c345796c01d85e6332e81cab6d70de83b829cf1d9762d0a3da59c7932"}, + {file = "cffi-1.17.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0e60821d312f99d3e1569202518dddf10ae547e799d75aef3bca3a2d9e8ee693"}, + {file = "cffi-1.17.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:eb09b82377233b902d4c3fbeeb7ad731cdab579c6c6fda1f763cd779139e47c3"}, + {file = "cffi-1.17.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:24658baf6224d8f280e827f0a50c46ad819ec8ba380a42448e24459daf809cf4"}, + {file = "cffi-1.17.0-cp39-cp39-win32.whl", hash = "sha256:0fdacad9e0d9fc23e519efd5ea24a70348305e8d7d85ecbb1a5fa66dc834e7fb"}, + {file = "cffi-1.17.0-cp39-cp39-win_amd64.whl", hash = "sha256:7cbc78dc018596315d4e7841c8c3a7ae31cc4d638c9b627f87d52e8abaaf2d29"}, + {file = "cffi-1.17.0.tar.gz", hash = "sha256:f3157624b7558b914cb039fd1af735e5e8049a87c817cc215109ad1c8779df76"}, ] [package.dependencies] @@ -381,63 +396,83 @@ typing-extensions = "*" [[package]] name = "coverage" -version = "7.6.0" +version = "7.6.1" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dff044f661f59dace805eedb4a7404c573b6ff0cdba4a524141bc63d7be5c7fd"}, - {file = "coverage-7.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a8659fd33ee9e6ca03950cfdcdf271d645cf681609153f218826dd9805ab585c"}, - {file = "coverage-7.6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7792f0ab20df8071d669d929c75c97fecfa6bcab82c10ee4adb91c7a54055463"}, - {file = "coverage-7.6.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4b3cd1ca7cd73d229487fa5caca9e4bc1f0bca96526b922d61053ea751fe791"}, - {file = "coverage-7.6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7e128f85c0b419907d1f38e616c4f1e9f1d1b37a7949f44df9a73d5da5cd53c"}, - {file = "coverage-7.6.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a94925102c89247530ae1dab7dc02c690942566f22e189cbd53579b0693c0783"}, - {file = "coverage-7.6.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:dcd070b5b585b50e6617e8972f3fbbee786afca71b1936ac06257f7e178f00f6"}, - {file = "coverage-7.6.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d50a252b23b9b4dfeefc1f663c568a221092cbaded20a05a11665d0dbec9b8fb"}, - {file = "coverage-7.6.0-cp310-cp310-win32.whl", hash = "sha256:0e7b27d04131c46e6894f23a4ae186a6a2207209a05df5b6ad4caee6d54a222c"}, - {file = "coverage-7.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:54dece71673b3187c86226c3ca793c5f891f9fc3d8aa183f2e3653da18566169"}, - {file = "coverage-7.6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7b525ab52ce18c57ae232ba6f7010297a87ced82a2383b1afd238849c1ff933"}, - {file = "coverage-7.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4bea27c4269234e06f621f3fac3925f56ff34bc14521484b8f66a580aacc2e7d"}, - {file = "coverage-7.6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed8d1d1821ba5fc88d4a4f45387b65de52382fa3ef1f0115a4f7a20cdfab0e94"}, - {file = "coverage-7.6.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01c322ef2bbe15057bc4bf132b525b7e3f7206f071799eb8aa6ad1940bcf5fb1"}, - {file = "coverage-7.6.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03cafe82c1b32b770a29fd6de923625ccac3185a54a5e66606da26d105f37dac"}, - {file = "coverage-7.6.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0d1b923fc4a40c5832be4f35a5dab0e5ff89cddf83bb4174499e02ea089daf57"}, - {file = "coverage-7.6.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4b03741e70fb811d1a9a1d75355cf391f274ed85847f4b78e35459899f57af4d"}, - {file = "coverage-7.6.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a73d18625f6a8a1cbb11eadc1d03929f9510f4131879288e3f7922097a429f63"}, - {file = "coverage-7.6.0-cp311-cp311-win32.whl", hash = "sha256:65fa405b837060db569a61ec368b74688f429b32fa47a8929a7a2f9b47183713"}, - {file = "coverage-7.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:6379688fb4cfa921ae349c76eb1a9ab26b65f32b03d46bb0eed841fd4cb6afb1"}, - {file = "coverage-7.6.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f7db0b6ae1f96ae41afe626095149ecd1b212b424626175a6633c2999eaad45b"}, - {file = "coverage-7.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bbdf9a72403110a3bdae77948b8011f644571311c2fb35ee15f0f10a8fc082e8"}, - {file = "coverage-7.6.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cc44bf0315268e253bf563f3560e6c004efe38f76db03a1558274a6e04bf5d5"}, - {file = "coverage-7.6.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:da8549d17489cd52f85a9829d0e1d91059359b3c54a26f28bec2c5d369524807"}, - {file = "coverage-7.6.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0086cd4fc71b7d485ac93ca4239c8f75732c2ae3ba83f6be1c9be59d9e2c6382"}, - {file = "coverage-7.6.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1fad32ee9b27350687035cb5fdf9145bc9cf0a094a9577d43e909948ebcfa27b"}, - {file = "coverage-7.6.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:044a0985a4f25b335882b0966625270a8d9db3d3409ddc49a4eb00b0ef5e8cee"}, - {file = "coverage-7.6.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:76d5f82213aa78098b9b964ea89de4617e70e0d43e97900c2778a50856dac605"}, - {file = "coverage-7.6.0-cp312-cp312-win32.whl", hash = "sha256:3c59105f8d58ce500f348c5b56163a4113a440dad6daa2294b5052a10db866da"}, - {file = "coverage-7.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:ca5d79cfdae420a1d52bf177de4bc2289c321d6c961ae321503b2ca59c17ae67"}, - {file = "coverage-7.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d39bd10f0ae453554798b125d2f39884290c480f56e8a02ba7a6ed552005243b"}, - {file = "coverage-7.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:beb08e8508e53a568811016e59f3234d29c2583f6b6e28572f0954a6b4f7e03d"}, - {file = "coverage-7.6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2e16f4cd2bc4d88ba30ca2d3bbf2f21f00f382cf4e1ce3b1ddc96c634bc48ca"}, - {file = "coverage-7.6.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6616d1c9bf1e3faea78711ee42a8b972367d82ceae233ec0ac61cc7fec09fa6b"}, - {file = "coverage-7.6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad4567d6c334c46046d1c4c20024de2a1c3abc626817ae21ae3da600f5779b44"}, - {file = "coverage-7.6.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d17c6a415d68cfe1091d3296ba5749d3d8696e42c37fca5d4860c5bf7b729f03"}, - {file = "coverage-7.6.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9146579352d7b5f6412735d0f203bbd8d00113a680b66565e205bc605ef81bc6"}, - {file = "coverage-7.6.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:cdab02a0a941af190df8782aafc591ef3ad08824f97850b015c8c6a8b3877b0b"}, - {file = "coverage-7.6.0-cp38-cp38-win32.whl", hash = "sha256:df423f351b162a702c053d5dddc0fc0ef9a9e27ea3f449781ace5f906b664428"}, - {file = "coverage-7.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:f2501d60d7497fd55e391f423f965bbe9e650e9ffc3c627d5f0ac516026000b8"}, - {file = "coverage-7.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7221f9ac9dad9492cecab6f676b3eaf9185141539d5c9689d13fd6b0d7de840c"}, - {file = "coverage-7.6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ddaaa91bfc4477d2871442bbf30a125e8fe6b05da8a0015507bfbf4718228ab2"}, - {file = "coverage-7.6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4cbe651f3904e28f3a55d6f371203049034b4ddbce65a54527a3f189ca3b390"}, - {file = "coverage-7.6.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:831b476d79408ab6ccfadaaf199906c833f02fdb32c9ab907b1d4aa0713cfa3b"}, - {file = "coverage-7.6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46c3d091059ad0b9c59d1034de74a7f36dcfa7f6d3bde782c49deb42438f2450"}, - {file = "coverage-7.6.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4d5fae0a22dc86259dee66f2cc6c1d3e490c4a1214d7daa2a93d07491c5c04b6"}, - {file = "coverage-7.6.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:07ed352205574aad067482e53dd606926afebcb5590653121063fbf4e2175166"}, - {file = "coverage-7.6.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:49c76cdfa13015c4560702574bad67f0e15ca5a2872c6a125f6327ead2b731dd"}, - {file = "coverage-7.6.0-cp39-cp39-win32.whl", hash = "sha256:482855914928c8175735a2a59c8dc5806cf7d8f032e4820d52e845d1f731dca2"}, - {file = "coverage-7.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:543ef9179bc55edfd895154a51792b01c017c87af0ebaae092720152e19e42ca"}, - {file = "coverage-7.6.0-pp38.pp39.pp310-none-any.whl", hash = "sha256:6fe885135c8a479d3e37a7aae61cbd3a0fb2deccb4dda3c25f92a49189f766d6"}, - {file = "coverage-7.6.0.tar.gz", hash = "sha256:289cc803fa1dc901f84701ac10c9ee873619320f2f9aff38794db4a4a0268d51"}, + {file = "coverage-7.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16"}, + {file = "coverage-7.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959"}, + {file = "coverage-7.6.1-cp310-cp310-win32.whl", hash = "sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232"}, + {file = "coverage-7.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0"}, + {file = "coverage-7.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93"}, + {file = "coverage-7.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133"}, + {file = "coverage-7.6.1-cp311-cp311-win32.whl", hash = "sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c"}, + {file = "coverage-7.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6"}, + {file = "coverage-7.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778"}, + {file = "coverage-7.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d"}, + {file = "coverage-7.6.1-cp312-cp312-win32.whl", hash = "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5"}, + {file = "coverage-7.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb"}, + {file = "coverage-7.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106"}, + {file = "coverage-7.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155"}, + {file = "coverage-7.6.1-cp313-cp313-win32.whl", hash = "sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a"}, + {file = "coverage-7.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129"}, + {file = "coverage-7.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e"}, + {file = "coverage-7.6.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3"}, + {file = "coverage-7.6.1-cp313-cp313t-win32.whl", hash = "sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f"}, + {file = "coverage-7.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657"}, + {file = "coverage-7.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0"}, + {file = "coverage-7.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989"}, + {file = "coverage-7.6.1-cp38-cp38-win32.whl", hash = "sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7"}, + {file = "coverage-7.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8"}, + {file = "coverage-7.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255"}, + {file = "coverage-7.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36"}, + {file = "coverage-7.6.1-cp39-cp39-win32.whl", hash = "sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c"}, + {file = "coverage-7.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca"}, + {file = "coverage-7.6.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df"}, + {file = "coverage-7.6.1.tar.gz", hash = "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d"}, ] [package.dependencies] @@ -575,13 +610,13 @@ tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipyth [[package]] name = "google-auth" -version = "2.32.0" +version = "2.33.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" files = [ - {file = "google_auth-2.32.0-py2.py3-none-any.whl", hash = "sha256:53326ea2ebec768070a94bee4e1b9194c9646ea0c2bd72422785bd0f9abfad7b"}, - {file = "google_auth-2.32.0.tar.gz", hash = "sha256:49315be72c55a6a37d62819e3573f6b416aca00721f7e3e31a008d928bf64022"}, + {file = "google_auth-2.33.0-py2.py3-none-any.whl", hash = "sha256:8eff47d0d4a34ab6265c50a106a3362de6a9975bb08998700e389f857e4d39df"}, + {file = "google_auth-2.33.0.tar.gz", hash = "sha256:d6a52342160d7290e334b4d47ba390767e4438ad0d45b7630774533e82655b95"}, ] [package.dependencies] @@ -1066,13 +1101,13 @@ dev = ["jinja2"] [[package]] name = "paramiko" -version = "3.4.0" +version = "3.4.1" description = "SSH2 protocol library" optional = false python-versions = ">=3.6" files = [ - {file = "paramiko-3.4.0-py3-none-any.whl", hash = "sha256:43f0b51115a896f9c00f59618023484cb3a14b98bbceab43394a39c6739b7ee7"}, - {file = "paramiko-3.4.0.tar.gz", hash = "sha256:aac08f26a31dc4dffd92821527d1682d99d52f9ef6851968114a8728f3c274d3"}, + {file = "paramiko-3.4.1-py3-none-any.whl", hash = "sha256:8e49fd2f82f84acf7ffd57c64311aa2b30e575370dc23bdb375b10262f7eac32"}, + {file = "paramiko-3.4.1.tar.gz", hash = "sha256:8b15302870af7f6652f2e038975c1d2973f06046cb5d7d65355668b3ecbece0c"}, ] [package.dependencies] @@ -1145,22 +1180,22 @@ wcwidth = "*" [[package]] name = "protobuf" -version = "5.27.2" +version = "5.27.3" description = "" optional = false python-versions = ">=3.8" files = [ - {file = "protobuf-5.27.2-cp310-abi3-win32.whl", hash = "sha256:354d84fac2b0d76062e9b3221f4abbbacdfd2a4d8af36bab0474f3a0bb30ab38"}, - {file = "protobuf-5.27.2-cp310-abi3-win_amd64.whl", hash = "sha256:0e341109c609749d501986b835f667c6e1e24531096cff9d34ae411595e26505"}, - {file = "protobuf-5.27.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a109916aaac42bff84702fb5187f3edadbc7c97fc2c99c5ff81dd15dcce0d1e5"}, - {file = "protobuf-5.27.2-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:176c12b1f1c880bf7a76d9f7c75822b6a2bc3db2d28baa4d300e8ce4cde7409b"}, - {file = "protobuf-5.27.2-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:b848dbe1d57ed7c191dfc4ea64b8b004a3f9ece4bf4d0d80a367b76df20bf36e"}, - {file = "protobuf-5.27.2-cp38-cp38-win32.whl", hash = "sha256:4fadd8d83e1992eed0248bc50a4a6361dc31bcccc84388c54c86e530b7f58863"}, - {file = "protobuf-5.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:610e700f02469c4a997e58e328cac6f305f649826853813177e6290416e846c6"}, - {file = "protobuf-5.27.2-cp39-cp39-win32.whl", hash = "sha256:9e8f199bf7f97bd7ecebffcae45ebf9527603549b2b562df0fbc6d4d688f14ca"}, - {file = "protobuf-5.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:7fc3add9e6003e026da5fc9e59b131b8f22b428b991ccd53e2af8071687b4fce"}, - {file = "protobuf-5.27.2-py3-none-any.whl", hash = "sha256:54330f07e4949d09614707c48b06d1a22f8ffb5763c159efd5c0928326a91470"}, - {file = "protobuf-5.27.2.tar.gz", hash = "sha256:f3ecdef226b9af856075f28227ff2c90ce3a594d092c39bee5513573f25e2714"}, + {file = "protobuf-5.27.3-cp310-abi3-win32.whl", hash = "sha256:dcb307cd4ef8fec0cf52cb9105a03d06fbb5275ce6d84a6ae33bc6cf84e0a07b"}, + {file = "protobuf-5.27.3-cp310-abi3-win_amd64.whl", hash = "sha256:16ddf3f8c6c41e1e803da7abea17b1793a97ef079a912e42351eabb19b2cffe7"}, + {file = "protobuf-5.27.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:68248c60d53f6168f565a8c76dc58ba4fa2ade31c2d1ebdae6d80f969cdc2d4f"}, + {file = "protobuf-5.27.3-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:b8a994fb3d1c11156e7d1e427186662b64694a62b55936b2b9348f0a7c6625ce"}, + {file = "protobuf-5.27.3-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:a55c48f2a2092d8e213bd143474df33a6ae751b781dd1d1f4d953c128a415b25"}, + {file = "protobuf-5.27.3-cp38-cp38-win32.whl", hash = "sha256:043853dcb55cc262bf2e116215ad43fa0859caab79bb0b2d31b708f128ece035"}, + {file = "protobuf-5.27.3-cp38-cp38-win_amd64.whl", hash = "sha256:c2a105c24f08b1e53d6c7ffe69cb09d0031512f0b72f812dd4005b8112dbe91e"}, + {file = "protobuf-5.27.3-cp39-cp39-win32.whl", hash = "sha256:c84eee2c71ed83704f1afbf1a85c3171eab0fd1ade3b399b3fad0884cbcca8bf"}, + {file = "protobuf-5.27.3-cp39-cp39-win_amd64.whl", hash = "sha256:af7c0b7cfbbb649ad26132e53faa348580f844d9ca46fd3ec7ca48a1ea5db8a1"}, + {file = "protobuf-5.27.3-py3-none-any.whl", hash = "sha256:8572c6533e544ebf6899c360e91d6bcbbee2549251643d32c52cf8a5de295ba5"}, + {file = "protobuf-5.27.3.tar.gz", hash = "sha256:82460903e640f2b7e34ee81a947fdaad89de796d324bcbc38ff5430bcdead82c"}, ] [[package]] @@ -1581,62 +1616,64 @@ files = [ [[package]] name = "pyyaml" -version = "6.0.1" +version = "6.0.2" description = "YAML parser and emitter for Python" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, - {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, - {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, ] [[package]] @@ -2102,4 +2139,4 @@ files = [ [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "57ac238e0511cea506f2693ecdb4b163dd4cb7a0d88435f1ad44f977cb50ba89" +content-hash = "c7cb79450373864dd99bf571c2f6489124755b12d1b8f04cd9a8241326fa7d49" diff --git a/pyproject.toml b/pyproject.toml index b8f05243..ceffc1af 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,6 +6,7 @@ package-mode = false [tool.poetry.dependencies] +lightkube = "^0.15.3" python = "^3.10" ops = "^2.15.0" pymongo = "^4.7.3" diff --git a/src/charm.py b/src/charm.py index 6e9b35dd..72a035a5 100755 --- a/src/charm.py +++ b/src/charm.py @@ -4,17 +4,11 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import json - from exceptions import MissingSecretError - from ops.pebble import PathError, ProtocolError - - from typing import Set, Optional, Dict - - +from node_port import NodePortManager from charms.mongos.v0.set_status import MongosStatusHandler - from charms.mongodb.v0.mongodb_secrets import SecretCache from charms.mongodb.v0.mongodb_secrets import generate_secret_label from charms.mongodb.v1.mongos import MongosConfiguration, MongosConnection @@ -58,9 +52,7 @@ class MongosCharm(ops.CharmBase): def __init__(self, *args): super().__init__(*args) - self.framework.observe( - self.on.mongos_pebble_ready, self._on_mongos_pebble_ready - ) + self.framework.observe(self.on.mongos_pebble_ready, self._on_mongos_pebble_ready) self.framework.observe(self.on.start, self._on_start) self.framework.observe(self.on.update_status, self._on_update_status) @@ -68,9 +60,16 @@ def __init__(self, *args): self.secrets = SecretCache(self) self.status = MongosStatusHandler(self) + self.node_port_manager = NodePortManager( + self, pod_name=self.unit.name.replace("/", "-"), namespace=self.model.name + ) + # BEGIN: hook functions def _on_mongos_pebble_ready(self, event) -> None: """Configure MongoDB pebble layer specification.""" + # any external services must be created before setting of properties + self.update_external_services() + if not self.is_integrated_to_config_server(): logger.info( "mongos service not starting. Cannot start until application is integrated to a config-server." @@ -106,9 +105,7 @@ def _on_start(self, event: StartEvent) -> None: # start hooks are fired before relation hooks and `mongos` requires a config-server in # order to start. Wait to receive config-server info from the relation event before # starting `mongos` daemon - self.status.set_and_share_status( - BlockedStatus("Missing relation to config-server.") - ) + self.status.set_and_share_status(BlockedStatus("Missing relation to config-server.")) def _on_update_status(self, _): """Handle the update status event""" @@ -119,9 +116,7 @@ def _on_update_status(self, _): logger.info( "Missing integration to config-server. mongos cannot run unless connected to config-server." ) - self.status.set_and_share_status( - BlockedStatus("Missing relation to config-server.") - ) + self.status.set_and_share_status(BlockedStatus("Missing relation to config-server.")) return self.status.set_and_share_status(ActiveStatus()) @@ -129,9 +124,20 @@ def _on_update_status(self, _): # END: hook functions # BEGIN: helper functions + def update_external_services(self) -> None: + """Attempts to update any external Kubernetes services.""" + if not self.is_external_client: + return + + # every unit attempts to create a bootstrap service + # if exists, will silently continue + self.node_port_manager.apply_service( + service=self.node_port_manager.build_node_port_services(port=Config.MONGOS_PORT) + ) + def is_integrated_to_config_server(self) -> bool: """Returns True if the mongos application is integrated to a config-server.""" - return self.model.relations[Config.Relations.CLUSTER_RELATIONS_NAME] is not None + return len(self.model.relations[Config.Relations.CLUSTER_RELATIONS_NAME]) def _get_mongos_config_for_user( self, user: MongoDBUser, hosts: Set[str] @@ -187,9 +193,7 @@ def remove_secret(self, scope, key) -> None: content = secret.get_content() if not content.get(key) or content[key] == Config.Secrets.SECRET_DELETED_LABEL: - logger.error( - f"Non-existing secret {scope}:{key} was attempted to be removed." - ) + logger.error(f"Non-existing secret {scope}:{key} was attempted to be removed.") return content[key] = Config.Secrets.SECRET_DELETED_LABEL @@ -214,9 +218,7 @@ def set_database(self, database: str) -> None: return # a mongos shard can only be related to one config server - config_server_rel = self.model.relations[ - Config.Relations.CLUSTER_RELATIONS_NAME - ][0] + config_server_rel = self.model.relations[Config.Relations.CLUSTER_RELATIONS_NAME][0] self.cluster.database_requires.update_relation_data( config_server_rel.id, {DATABASE_TAG: database} ) @@ -321,9 +323,7 @@ def _pull_licenses(container: Container) -> None: for license_name in licenses: try: - license_file = container.pull( - path=Config.get_license_path(license_name) - ) + license_file = container.pull(path=Config.get_license_path(license_name)) f = open(f"LICENSE_{license_name}", "x") f.write(str(license_file.read())) f.close() @@ -340,14 +340,10 @@ def _set_data_dir_permissions(container: Container) -> None: for path in [Config.DATA_DIR, Config.LOG_DIR, Config.LogRotate.LOG_STATUS_DIR]: paths = container.list_files(path, itself=True) if not len(paths) == 1: - raise ExtraDataDirError( - "list_files doesn't return only the directory itself" - ) + raise ExtraDataDirError("list_files doesn't return only the directory itself") logger.debug(f"Data directory ownership: {paths[0].user}:{paths[0].group}") if paths[0].user != Config.UNIX_USER or paths[0].group != Config.UNIX_GROUP: - container.exec( - f"chown {Config.UNIX_USER}:{Config.UNIX_GROUP} -R {path}".split() - ) + container.exec(f"chown {Config.UNIX_USER}:{Config.UNIX_GROUP} -R {path}".split()) def push_file_to_unit( self, diff --git a/src/node_port.py b/src/node_port.py new file mode 100644 index 00000000..b087cfa9 --- /dev/null +++ b/src/node_port.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Manager for handling mongos Kubernetes resources for a single mongos pod.""" + +import logging +from functools import cached_property +from typing import Literal, NamedTuple +from ops.charm import CharmBase +from lightkube.models.meta_v1 import ObjectMeta +from lightkube.core.client import Client +from lightkube.core.exceptions import ApiError +from lightkube.resources.core_v1 import Pod, Service +from lightkube.models.core_v1 import ServicePort, ServiceSpec + + +logger = logging.getLogger(__name__) + +# default logging from lightkube httpx requests is very noisy +logging.getLogger("lightkube").disabled = True +logging.getLogger("lightkube.core.client").disabled = True +logging.getLogger("httpx").disabled = True +logging.getLogger("httpcore").disabled = True + +AuthMechanism = Literal["SCRAM-SHA-512", "OAUTHBEARER", "SSL"] +AuthProtocol = Literal["SASL_PLAINTEXT", "SASL_SSL", "SSL"] +AuthMap = NamedTuple("AuthMap", protocol=AuthProtocol, mechanism=AuthMechanism) + + +class NodePortManager: + """Manager for handling mongos Kubernetes resources for a single mongos pod.""" + + def __init__( + self, + charm: CharmBase, + pod_name: str, + namespace: str, + ): + self.charm = charm + self.pod_name = pod_name + self.app_name = "-".join(pod_name.split("-")[:-1]) + self.namespace = namespace + self.nodeport_service_name = f"{self.app_name}-nodeport" + self.short_auth_mechanism_mapping: dict[AuthMechanism, str] = { + "SCRAM-SHA-512": "scram", + "OAUTHBEARER": "oauth", + "SSL": "ssl", + } + + @cached_property + def client(self) -> Client: + """The Lightkube client.""" + return Client( # pyright: ignore[reportArgumentType] + field_manager=self.pod_name, + namespace=self.namespace, + ) + + # --- GETTERS --- + + def get_pod(self, pod_name: str = "") -> Pod: + """Gets the Pod via the K8s API.""" + # Allows us to get pods from other peer units + pod_name = pod_name or self.pod_name + + return self.client.get( + res=Pod, + name=self.pod_name, + ) + + def get_service(self, service_name: str) -> Service | None: + """Gets the Service via the K8s API.""" + return self.client.get( + res=Service, + name=service_name, + ) + + def get_node_port( + self, + service: Service, + auth_map: AuthMap, + ) -> int: + """Gets the NodePort number for the service via the K8s API.""" + if not service.spec or not service.spec.ports: + raise Exception("Could not find Service spec or ports") + + for port in service.spec.ports: + if ( + auth_map.protocol.lower().replace("_", "-") in port.name + and self.short_auth_mechanism_mapping[auth_map.mechanism] in port.name + ): + return port.nodePort + + raise Exception( + f"Unable to find NodePort using {auth_map.protocol} and {auth_map.mechanism} for the {service} service" + ) + + def build_node_port_services(self, port: str) -> Service: + """Builds a ClusterIP service for initial client connection.""" + pod = self.get_pod(pod_name=self.pod_name) + if not pod.metadata: + raise Exception(f"Could not find metadata for {pod}") + + return Service( + metadata=ObjectMeta( + name=self.nodeport_service_name, + namespace=self.namespace, + # owned by the StatefulSet + ownerReferences=pod.metadata.ownerReferences, + ), + spec=ServiceSpec( + type="NodePort", + selector={"app.kubernetes.io/name": self.app_name}, + ports=[ + ServicePort( + protocol="TCP", + port=port, + targetPort=port, + name=f"{self.charm.app.name}-nodeport", + ) + ], + ), + ) + + def build_listener_service_name(self, auth_map: AuthMap): + """Builds the Service name for a given auth.protocol and auth.mechanism. + + Returns: + String of listener service name + e.g `mongos-0-sasl-plaintext-scram`, `mongos-12-sasl-ssl-oauth` + """ + return f"{self.pod_name}-{auth_map.protocol.lower().replace('_','-')}-{self.short_auth_mechanism_mapping[auth_map.mechanism]}" + + def apply_service(self, service: Service) -> None: + """Applies a given Service.""" + try: + self.client.apply(service) + except ApiError as e: + if e.status.code == 403: + logger.error("Could not apply service, application needs `juju trust`") + return + if e.status.code == 422 and "port is already allocated" in e.status.message: + logger.error(e.status.message) + return + else: + raise From ea47260c19a4c70e202fd96e57987be73b9b762e Mon Sep 17 00:00:00 2001 From: MiaAltieri Date: Thu, 15 Aug 2024 09:01:20 +0000 Subject: [PATCH 02/19] add tests for nodeport --- tests/integration/helpers.py | 49 +++++++++++++++++++++++++++++---- tests/integration/test_charm.py | 24 ++++++++++++++-- 2 files changed, 65 insertions(+), 8 deletions(-) diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index f0f215dd..78330443 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -3,16 +3,17 @@ # See LICENSE file for licensing details. import json -from typing import Any, Dict, List, Optional +import subprocess import logging +from typing import Any, Dict, List, Optional from dateutil.parser import parse from pytest_operator.plugin import OpsTest from tenacity import Retrying, stop_after_delay, wait_fixed logger = logging.getLogger(__name__) - +PORT_MAPPING_INDEX = 4 MONGOS_APP_NAME = "mongos" @@ -138,10 +139,48 @@ async def wait_for_mongos_units_blocked( try: old_interval = (await ops_test.model.get_config())[hook_interval_key] await ops_test.model.set_config({hook_interval_key: "1m"}) - for attempt in Retrying( - stop=stop_after_delay(timeout), wait=wait_fixed(1), reraise=True - ): + for attempt in Retrying(stop=stop_after_delay(timeout), wait=wait_fixed(1), reraise=True): with attempt: await check_all_units_blocked_with_status(ops_test, db_app_name, status) finally: await ops_test.model.set_config({hook_interval_key: old_interval}) + + +def get_port_from_node_port(ops_test: OpsTest, node_port_name: str) -> None: + node_port_cmd = ( + f"kubectl get svc -n {ops_test.model.name} | grep NodePort | grep {node_port_name}" + ) + result = subprocess.run(node_port_cmd, shell=True, capture_output=True, text=True) + if result.returncode: + logger.info("was not able to find nodeport") + assert False, f"Command: {node_port_cmd} to find node port failed." + + assert ( + len(result.stdout.splitlines()) > 0 + ), "No port information available for expected service" + + # port information is available at PORT_MAPPING_INDEX + port_mapping = result.stdout.split()[PORT_MAPPING_INDEX] + + # port information is of the form 27018:30259/TCP + return port_mapping.split(":")[1].split("/")[0] + + +def assert_node_port_available(ops_test: OpsTest, node_port_name: str) -> None: + assert get_port_from_node_port( + ops_test, node_port_name + ), "No port information for expected service" + + +def get_public_k8s_ip() -> str: + result = subprocess.run("kubectl get nodes", shell=True, capture_output=True, text=True) + + if result.returncode: + logger.info("failed to retrieve public facing k8s IP") + assert False, "failed to retrieve public facing k8s IP" + + # port information is the first item of the last line + port_mapping = result.stdout.splitlines()[-1].split()[0] + + # port mapping is of the form ip-172-31-18-133 + return port_mapping.split("ip-")[1].replace("-", ".") diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py index 2af5c074..7a75e924 100644 --- a/tests/integration/test_charm.py +++ b/tests/integration/test_charm.py @@ -9,7 +9,10 @@ from pytest_operator.plugin import OpsTest from .helpers import ( + assert_node_port_available, wait_for_mongos_units_blocked, + get_public_k8s_ip, + get_port_from_node_port, MONGOS_APP_NAME, ) @@ -24,9 +27,7 @@ async def test_build_and_deploy(ops_test: OpsTest): Assert on the unit status before any relations/configurations take place. """ charm = await ops_test.build_charm(".") - resources = { - "mongodb-image": METADATA["resources"]["mongodb-image"]["upstream-source"] - } + resources = {"mongodb-image": METADATA["resources"]["mongodb-image"]["upstream-source"]} await ops_test.model.deploy( charm, resources=resources, @@ -47,3 +48,20 @@ async def test_waits_for_config_server(ops_test: OpsTest) -> None: status="Missing relation to config-server.", timeout=300, ) + + +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_mongos_external_connections(ops_test: OpsTest) -> None: + """Tests that mongos is accessible externally.""" + assert_node_port_available(ops_test, node_port_name="mongos-k8s-nodeport") + + # TODO add this in once DPE-5040 / PR #20 merges + # exposed_node_port = get_port_from_node_port(ops_test, node_port_name="mongos-k8s-nodeport") + # public_k8s_ip = get_public_k8s_ip() + # username, password = await get_mongos_user_password(ops_test, MONGOS_APP_NAME) + # external_mongos_client = MongoClient( + # f"mongodb://{username}:{password}@{public_k8s_ip}:{exposed_node_port}" + # ) + # external_mongos_client.admin.command("usersInfo") + # external_mongos_client.close() From 2c97677fb54741f20b1d1843e34a70ac4d487805 Mon Sep 17 00:00:00 2001 From: MiaAltieri Date: Thu, 15 Aug 2024 09:14:10 +0000 Subject: [PATCH 03/19] fmt + lint --- src/charm.py | 36 ++++++++++++++++++++++++--------- tests/integration/helpers.py | 12 ++++++----- tests/integration/test_charm.py | 6 +++--- 3 files changed, 37 insertions(+), 17 deletions(-) diff --git a/src/charm.py b/src/charm.py index 72a035a5..a89458e7 100755 --- a/src/charm.py +++ b/src/charm.py @@ -52,7 +52,9 @@ class MongosCharm(ops.CharmBase): def __init__(self, *args): super().__init__(*args) - self.framework.observe(self.on.mongos_pebble_ready, self._on_mongos_pebble_ready) + self.framework.observe( + self.on.mongos_pebble_ready, self._on_mongos_pebble_ready + ) self.framework.observe(self.on.start, self._on_start) self.framework.observe(self.on.update_status, self._on_update_status) @@ -105,7 +107,9 @@ def _on_start(self, event: StartEvent) -> None: # start hooks are fired before relation hooks and `mongos` requires a config-server in # order to start. Wait to receive config-server info from the relation event before # starting `mongos` daemon - self.status.set_and_share_status(BlockedStatus("Missing relation to config-server.")) + self.status.set_and_share_status( + BlockedStatus("Missing relation to config-server.") + ) def _on_update_status(self, _): """Handle the update status event""" @@ -116,7 +120,9 @@ def _on_update_status(self, _): logger.info( "Missing integration to config-server. mongos cannot run unless connected to config-server." ) - self.status.set_and_share_status(BlockedStatus("Missing relation to config-server.")) + self.status.set_and_share_status( + BlockedStatus("Missing relation to config-server.") + ) return self.status.set_and_share_status(ActiveStatus()) @@ -132,7 +138,9 @@ def update_external_services(self) -> None: # every unit attempts to create a bootstrap service # if exists, will silently continue self.node_port_manager.apply_service( - service=self.node_port_manager.build_node_port_services(port=Config.MONGOS_PORT) + service=self.node_port_manager.build_node_port_services( + port=Config.MONGOS_PORT + ) ) def is_integrated_to_config_server(self) -> bool: @@ -193,7 +201,9 @@ def remove_secret(self, scope, key) -> None: content = secret.get_content() if not content.get(key) or content[key] == Config.Secrets.SECRET_DELETED_LABEL: - logger.error(f"Non-existing secret {scope}:{key} was attempted to be removed.") + logger.error( + f"Non-existing secret {scope}:{key} was attempted to be removed." + ) return content[key] = Config.Secrets.SECRET_DELETED_LABEL @@ -218,7 +228,9 @@ def set_database(self, database: str) -> None: return # a mongos shard can only be related to one config server - config_server_rel = self.model.relations[Config.Relations.CLUSTER_RELATIONS_NAME][0] + config_server_rel = self.model.relations[ + Config.Relations.CLUSTER_RELATIONS_NAME + ][0] self.cluster.database_requires.update_relation_data( config_server_rel.id, {DATABASE_TAG: database} ) @@ -323,7 +335,9 @@ def _pull_licenses(container: Container) -> None: for license_name in licenses: try: - license_file = container.pull(path=Config.get_license_path(license_name)) + license_file = container.pull( + path=Config.get_license_path(license_name) + ) f = open(f"LICENSE_{license_name}", "x") f.write(str(license_file.read())) f.close() @@ -340,10 +354,14 @@ def _set_data_dir_permissions(container: Container) -> None: for path in [Config.DATA_DIR, Config.LOG_DIR, Config.LogRotate.LOG_STATUS_DIR]: paths = container.list_files(path, itself=True) if not len(paths) == 1: - raise ExtraDataDirError("list_files doesn't return only the directory itself") + raise ExtraDataDirError( + "list_files doesn't return only the directory itself" + ) logger.debug(f"Data directory ownership: {paths[0].user}:{paths[0].group}") if paths[0].user != Config.UNIX_USER or paths[0].group != Config.UNIX_GROUP: - container.exec(f"chown {Config.UNIX_USER}:{Config.UNIX_GROUP} -R {path}".split()) + container.exec( + f"chown {Config.UNIX_USER}:{Config.UNIX_GROUP} -R {path}".split() + ) def push_file_to_unit( self, diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 78330443..2980e58a 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -139,7 +139,9 @@ async def wait_for_mongos_units_blocked( try: old_interval = (await ops_test.model.get_config())[hook_interval_key] await ops_test.model.set_config({hook_interval_key: "1m"}) - for attempt in Retrying(stop=stop_after_delay(timeout), wait=wait_fixed(1), reraise=True): + for attempt in Retrying( + stop=stop_after_delay(timeout), wait=wait_fixed(1), reraise=True + ): with attempt: await check_all_units_blocked_with_status(ops_test, db_app_name, status) finally: @@ -147,9 +149,7 @@ async def wait_for_mongos_units_blocked( def get_port_from_node_port(ops_test: OpsTest, node_port_name: str) -> None: - node_port_cmd = ( - f"kubectl get svc -n {ops_test.model.name} | grep NodePort | grep {node_port_name}" - ) + node_port_cmd = f"kubectl get svc -n {ops_test.model.name} | grep NodePort | grep {node_port_name}" result = subprocess.run(node_port_cmd, shell=True, capture_output=True, text=True) if result.returncode: logger.info("was not able to find nodeport") @@ -173,7 +173,9 @@ def assert_node_port_available(ops_test: OpsTest, node_port_name: str) -> None: def get_public_k8s_ip() -> str: - result = subprocess.run("kubectl get nodes", shell=True, capture_output=True, text=True) + result = subprocess.run( + "kubectl get nodes", shell=True, capture_output=True, text=True + ) if result.returncode: logger.info("failed to retrieve public facing k8s IP") diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py index 7a75e924..9bb04f66 100644 --- a/tests/integration/test_charm.py +++ b/tests/integration/test_charm.py @@ -11,8 +11,6 @@ from .helpers import ( assert_node_port_available, wait_for_mongos_units_blocked, - get_public_k8s_ip, - get_port_from_node_port, MONGOS_APP_NAME, ) @@ -27,7 +25,9 @@ async def test_build_and_deploy(ops_test: OpsTest): Assert on the unit status before any relations/configurations take place. """ charm = await ops_test.build_charm(".") - resources = {"mongodb-image": METADATA["resources"]["mongodb-image"]["upstream-source"]} + resources = { + "mongodb-image": METADATA["resources"]["mongodb-image"]["upstream-source"] + } await ops_test.model.deploy( charm, resources=resources, From ba8179682c38a01e893098a71515d83f54fae983 Mon Sep 17 00:00:00 2001 From: MiaAltieri Date: Thu, 15 Aug 2024 14:51:52 +0000 Subject: [PATCH 04/19] PR feedback + adjustements for tests --- src/charm.py | 12 +++----- src/node_port.py | 53 +++------------------------------ tests/integration/test_charm.py | 28 ++++++++++------- 3 files changed, 26 insertions(+), 67 deletions(-) diff --git a/src/charm.py b/src/charm.py index a89458e7..98baee9f 100755 --- a/src/charm.py +++ b/src/charm.py @@ -135,7 +135,7 @@ def update_external_services(self) -> None: if not self.is_external_client: return - # every unit attempts to create a bootstrap service + # every unit attempts to create a nodeport service # if exists, will silently continue self.node_port_manager.apply_service( service=self.node_port_manager.build_node_port_services( @@ -418,14 +418,10 @@ def _unit_ip(self) -> str: def is_external_client(self) -> Optional[str]: """Returns the connectivity mode which mongos should use. - This is determined by checking the modes requested by the client(s). - - TODO: Future PR. This should be modified to work for many clients. + Note that for K8s routers this should always default to True. However we still include + this function so that we can have parity on properties with the K8s and VM routers. """ - if EXTERNAL_CONNECTIVITY_TAG not in self.app_peer_data: - return False - - return json.loads(self.app_peer_data.get(EXTERNAL_CONNECTIVITY_TAG)) + return True @property def database(self) -> Optional[str]: diff --git a/src/node_port.py b/src/node_port.py index b087cfa9..56b47ab9 100644 --- a/src/node_port.py +++ b/src/node_port.py @@ -6,7 +6,6 @@ import logging from functools import cached_property -from typing import Literal, NamedTuple from ops.charm import CharmBase from lightkube.models.meta_v1 import ObjectMeta from lightkube.core.client import Client @@ -23,10 +22,6 @@ logging.getLogger("httpx").disabled = True logging.getLogger("httpcore").disabled = True -AuthMechanism = Literal["SCRAM-SHA-512", "OAUTHBEARER", "SSL"] -AuthProtocol = Literal["SASL_PLAINTEXT", "SASL_SSL", "SSL"] -AuthMap = NamedTuple("AuthMap", protocol=AuthProtocol, mechanism=AuthMechanism) - class NodePortManager: """Manager for handling mongos Kubernetes resources for a single mongos pod.""" @@ -42,11 +37,6 @@ def __init__( self.app_name = "-".join(pod_name.split("-")[:-1]) self.namespace = namespace self.nodeport_service_name = f"{self.app_name}-nodeport" - self.short_auth_mechanism_mapping: dict[AuthMechanism, str] = { - "SCRAM-SHA-512": "scram", - "OAUTHBEARER": "oauth", - "SSL": "ssl", - } @cached_property def client(self) -> Client: @@ -68,69 +58,34 @@ def get_pod(self, pod_name: str = "") -> Pod: name=self.pod_name, ) - def get_service(self, service_name: str) -> Service | None: - """Gets the Service via the K8s API.""" - return self.client.get( - res=Service, - name=service_name, - ) - - def get_node_port( - self, - service: Service, - auth_map: AuthMap, - ) -> int: - """Gets the NodePort number for the service via the K8s API.""" - if not service.spec or not service.spec.ports: - raise Exception("Could not find Service spec or ports") - - for port in service.spec.ports: - if ( - auth_map.protocol.lower().replace("_", "-") in port.name - and self.short_auth_mechanism_mapping[auth_map.mechanism] in port.name - ): - return port.nodePort - - raise Exception( - f"Unable to find NodePort using {auth_map.protocol} and {auth_map.mechanism} for the {service} service" - ) - def build_node_port_services(self, port: str) -> Service: """Builds a ClusterIP service for initial client connection.""" pod = self.get_pod(pod_name=self.pod_name) if not pod.metadata: raise Exception(f"Could not find metadata for {pod}") + unit_id = self.charm.unit.name.split("/")[1] return Service( metadata=ObjectMeta( - name=self.nodeport_service_name, + name=f"{self.charm.app.name}-{unit_id}-external", namespace=self.namespace, # owned by the StatefulSet ownerReferences=pod.metadata.ownerReferences, ), spec=ServiceSpec( type="NodePort", - selector={"app.kubernetes.io/name": self.app_name}, + selector={"app.kubernetes.io/name": self.pod_name}, ports=[ ServicePort( protocol="TCP", port=port, targetPort=port, - name=f"{self.charm.app.name}-nodeport", + name=f"{self.charm.app.name}", ) ], ), ) - def build_listener_service_name(self, auth_map: AuthMap): - """Builds the Service name for a given auth.protocol and auth.mechanism. - - Returns: - String of listener service name - e.g `mongos-0-sasl-plaintext-scram`, `mongos-12-sasl-ssl-oauth` - """ - return f"{self.pod_name}-{auth_map.protocol.lower().replace('_','-')}-{self.short_auth_mechanism_mapping[auth_map.mechanism]}" - def apply_service(self, service: Service) -> None: """Applies a given Service.""" try: diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py index 9bb04f66..2bf0ff2d 100644 --- a/tests/integration/test_charm.py +++ b/tests/integration/test_charm.py @@ -33,6 +33,11 @@ async def test_build_and_deploy(ops_test: OpsTest): resources=resources, application_name=MONGOS_APP_NAME, series="jammy", + num_units=2, + ) + + await ops_test.model.wait_for_idle( + apps=[MONGOS_APP_NAME], timeout=1000, idle_period=30 ) @@ -54,14 +59,17 @@ async def test_waits_for_config_server(ops_test: OpsTest) -> None: @pytest.mark.abort_on_fail async def test_mongos_external_connections(ops_test: OpsTest) -> None: """Tests that mongos is accessible externally.""" - assert_node_port_available(ops_test, node_port_name="mongos-k8s-nodeport") + for unit_id in range(len(ops_test.model.applications[MONGOS_APP_NAME].units)): + assert_node_port_available( + ops_test, node_port_name=f"{MONGOS_APP_NAME}-{unit_id}-external" + ) - # TODO add this in once DPE-5040 / PR #20 merges - # exposed_node_port = get_port_from_node_port(ops_test, node_port_name="mongos-k8s-nodeport") - # public_k8s_ip = get_public_k8s_ip() - # username, password = await get_mongos_user_password(ops_test, MONGOS_APP_NAME) - # external_mongos_client = MongoClient( - # f"mongodb://{username}:{password}@{public_k8s_ip}:{exposed_node_port}" - # ) - # external_mongos_client.admin.command("usersInfo") - # external_mongos_client.close() + # TODO add this in once DPE-5040 / PR #20 merges + # exposed_node_port = get_port_from_node_port(ops_test, node_port_name="mongos-k8s-nodeport") + # public_k8s_ip = get_public_k8s_ip() + # username, password = await get_mongos_user_password(ops_test, MONGOS_APP_NAME) + # external_mongos_client = MongoClient( + # f"mongodb://{username}:{password}@{public_k8s_ip}:{exposed_node_port}" + # ) + # external_mongos_client.admin.command("usersInfo") + # external_mongos_client.close() From a999ac8a65270c472cff1d1e70d97c7e415d3f22 Mon Sep 17 00:00:00 2001 From: MiaAltieri Date: Thu, 15 Aug 2024 16:00:35 +0000 Subject: [PATCH 05/19] PR feedback #2 --- src/charm.py | 4 +--- src/node_port.py | 28 +++++++++++++++++----------- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/src/charm.py b/src/charm.py index 98baee9f..49c88619 100755 --- a/src/charm.py +++ b/src/charm.py @@ -62,9 +62,7 @@ def __init__(self, *args): self.secrets = SecretCache(self) self.status = MongosStatusHandler(self) - self.node_port_manager = NodePortManager( - self, pod_name=self.unit.name.replace("/", "-"), namespace=self.model.name - ) + self.node_port_manager = NodePortManager(self) # BEGIN: hook functions def _on_mongos_pebble_ready(self, event) -> None: diff --git a/src/node_port.py b/src/node_port.py index 56b47ab9..56873849 100644 --- a/src/node_port.py +++ b/src/node_port.py @@ -7,7 +7,7 @@ import logging from functools import cached_property from ops.charm import CharmBase -from lightkube.models.meta_v1 import ObjectMeta +from lightkube.models.meta_v1 import ObjectMeta, OwnerReference from lightkube.core.client import Client from lightkube.core.exceptions import ApiError from lightkube.resources.core_v1 import Pod, Service @@ -29,14 +29,11 @@ class NodePortManager: def __init__( self, charm: CharmBase, - pod_name: str, - namespace: str, ): self.charm = charm - self.pod_name = pod_name - self.app_name = "-".join(pod_name.split("-")[:-1]) - self.namespace = namespace - self.nodeport_service_name = f"{self.app_name}-nodeport" + self.pod_name = self.charm.unit.name.replace("/", "-") + self.app_name = self.charm.app.name + self.namespace = self.charm.model.name @cached_property def client(self) -> Client: @@ -67,10 +64,19 @@ def build_node_port_services(self, port: str) -> Service: unit_id = self.charm.unit.name.split("/")[1] return Service( metadata=ObjectMeta( - name=f"{self.charm.app.name}-{unit_id}-external", + name=f"{self.app_name}-{unit_id}-external", namespace=self.namespace, - # owned by the StatefulSet - ownerReferences=pod.metadata.ownerReferences, + # When we scale-down K8s will keep the Services for the deleted 2 units around, + # unless the Services' owner is also deleted. + ownerReferences=[ + OwnerReference( + apiVersion=pod.apiVersion, + kind=pod.kind, + name=self.pod_name, + uid=pod.metadata.uid, + blockOwnerDeletion=False, + ) + ], ), spec=ServiceSpec( type="NodePort", @@ -80,7 +86,7 @@ def build_node_port_services(self, port: str) -> Service: protocol="TCP", port=port, targetPort=port, - name=f"{self.charm.app.name}", + name=f"{self.app_name}", ) ], ), From 4df596bc339309c1dc8179308d23606e6b842db6 Mon Sep 17 00:00:00 2001 From: MiaAltieri Date: Fri, 16 Aug 2024 09:39:29 +0000 Subject: [PATCH 06/19] additional PR changes --- src/node_port.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/node_port.py b/src/node_port.py index 56873849..b868b9cf 100644 --- a/src/node_port.py +++ b/src/node_port.py @@ -66,7 +66,7 @@ def build_node_port_services(self, port: str) -> Service: metadata=ObjectMeta( name=f"{self.app_name}-{unit_id}-external", namespace=self.namespace, - # When we scale-down K8s will keep the Services for the deleted 2 units around, + # When we scale-down K8s will keep the Services for the deleted units around, # unless the Services' owner is also deleted. ownerReferences=[ OwnerReference( @@ -86,7 +86,7 @@ def build_node_port_services(self, port: str) -> Service: protocol="TCP", port=port, targetPort=port, - name=f"{self.app_name}", + name=f"{self.pod_name}-port", ) ], ), From 9a7ec30e0af66c411aa5f8535176eef6ca6def54 Mon Sep 17 00:00:00 2001 From: Mia Altieri <32723809+MiaAltieri@users.noreply.github.com> Date: Fri, 16 Aug 2024 17:18:45 +0200 Subject: [PATCH 07/19] Update src/node_port.py Co-authored-by: Mehdi Bendriss --- src/node_port.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/node_port.py b/src/node_port.py index b868b9cf..6798f0d4 100644 --- a/src/node_port.py +++ b/src/node_port.py @@ -48,11 +48,9 @@ def client(self) -> Client: def get_pod(self, pod_name: str = "") -> Pod: """Gets the Pod via the K8s API.""" # Allows us to get pods from other peer units - pod_name = pod_name or self.pod_name - return self.client.get( res=Pod, - name=self.pod_name, + name=pod_name or self.pod_name, ) def build_node_port_services(self, port: str) -> Service: From 00c6efa910e6198c596fc9fbca686b8772c9d46f Mon Sep 17 00:00:00 2001 From: MiaAltieri Date: Fri, 16 Aug 2024 15:25:31 +0000 Subject: [PATCH 08/19] pr feedback --- src/charm.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/charm.py b/src/charm.py index 49c88619..c91b4236 100755 --- a/src/charm.py +++ b/src/charm.py @@ -143,7 +143,9 @@ def update_external_services(self) -> None: def is_integrated_to_config_server(self) -> bool: """Returns True if the mongos application is integrated to a config-server.""" - return len(self.model.relations[Config.Relations.CLUSTER_RELATIONS_NAME]) + return ( + self.model.get_relation(Config.Relations.CLUSTER_RELATIONS_NAME) is not None + ) def _get_mongos_config_for_user( self, user: MongoDBUser, hosts: Set[str] From 30becd8a39f3189387591676be3b5d9fdc521984 Mon Sep 17 00:00:00 2001 From: Mia Altieri <32723809+MiaAltieri@users.noreply.github.com> Date: Mon, 19 Aug 2024 16:43:08 +0200 Subject: [PATCH 09/19] Update src/charm.py Co-authored-by: Mehdi Bendriss --- actions.yaml | 11 + config.yaml | 7 + .../data_platform_libs/v0/data_interfaces.py | 3739 +++++++++++++++++ .../mongodb/v0/config_server_interface.py | 494 +++ lib/charms/mongodb/v0/mongodb_tls.py | 365 ++ lib/charms/mongodb/v1/helpers.py | 322 ++ .../v3/tls_certificates.py | 2009 +++++++++ poetry.lock | 16 +- pyproject.toml | 1 + src/charm.py | 239 +- src/config.py | 21 + .../integration/client_relations/__init__.py | 2 + tests/integration/client_relations/helpers.py | 64 + .../test_external_client_relations.py | 57 + tests/integration/helpers.py | 249 +- tests/integration/test_charm.py | 135 +- 16 files changed, 7583 insertions(+), 148 deletions(-) create mode 100644 actions.yaml create mode 100644 config.yaml create mode 100644 lib/charms/data_platform_libs/v0/data_interfaces.py create mode 100644 lib/charms/mongodb/v0/config_server_interface.py create mode 100644 lib/charms/mongodb/v0/mongodb_tls.py create mode 100644 lib/charms/mongodb/v1/helpers.py create mode 100644 lib/charms/tls_certificates_interface/v3/tls_certificates.py create mode 100644 tests/integration/client_relations/__init__.py create mode 100644 tests/integration/client_relations/helpers.py create mode 100644 tests/integration/client_relations/test_external_client_relations.py diff --git a/actions.yaml b/actions.yaml new file mode 100644 index 00000000..409689ac --- /dev/null +++ b/actions.yaml @@ -0,0 +1,11 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +set-tls-private-key: + description: Set the privates key, which will be used for certificate signing requests (CSR). Run for each unit separately. + params: + external-key: + type: string + description: The content of private key for external communications with clients. Content will be auto-generated if this option is not specified. + internal-key: + type: string + description: The content of private key for internal communications with clients. Content will be auto-generated if this option is not specified. diff --git a/config.yaml b/config.yaml new file mode 100644 index 00000000..37c35da9 --- /dev/null +++ b/config.yaml @@ -0,0 +1,7 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. +options: + expose-external: + description: "String to determine how to expose the mongos router externally from the Kubernetes cluster. Possible values: 'nodeport', 'none'" + type: string + default: "none" diff --git a/lib/charms/data_platform_libs/v0/data_interfaces.py b/lib/charms/data_platform_libs/v0/data_interfaces.py new file mode 100644 index 00000000..aaed2e52 --- /dev/null +++ b/lib/charms/data_platform_libs/v0/data_interfaces.py @@ -0,0 +1,3739 @@ +# Copyright 2023 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Library to manage the relation for the data-platform products. + +This library contains the Requires and Provides classes for handling the relation +between an application and multiple managed application supported by the data-team: +MySQL, Postgresql, MongoDB, Redis, and Kafka. + +### Database (MySQL, Postgresql, MongoDB, and Redis) + +#### Requires Charm +This library is a uniform interface to a selection of common database +metadata, with added custom events that add convenience to database management, +and methods to consume the application related data. + + +Following an example of using the DatabaseCreatedEvent, in the context of the +application charm code: + +```python + +from charms.data_platform_libs.v0.data_interfaces import ( + DatabaseCreatedEvent, + DatabaseRequires, +) + +class ApplicationCharm(CharmBase): + # Application charm that connects to database charms. + + def __init__(self, *args): + super().__init__(*args) + + # Charm events defined in the database requires charm library. + self.database = DatabaseRequires(self, relation_name="database", database_name="database") + self.framework.observe(self.database.on.database_created, self._on_database_created) + + def _on_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + + # Start application with rendered configuration + self._start_application(config_file) + + # Set active status + self.unit.status = ActiveStatus("received database credentials") +``` + +As shown above, the library provides some custom events to handle specific situations, +which are listed below: + +- database_created: event emitted when the requested database is created. +- endpoints_changed: event emitted when the read/write endpoints of the database have changed. +- read_only_endpoints_changed: event emitted when the read-only endpoints of the database + have changed. Event is not triggered if read/write endpoints changed too. + +If it is needed to connect multiple database clusters to the same relation endpoint +the application charm can implement the same code as if it would connect to only +one database cluster (like the above code example). + +To differentiate multiple clusters connected to the same relation endpoint +the application charm can use the name of the remote application: + +```python + +def _on_database_created(self, event: DatabaseCreatedEvent) -> None: + # Get the remote app name of the cluster that triggered this event + cluster = event.relation.app.name +``` + +It is also possible to provide an alias for each different database cluster/relation. + +So, it is possible to differentiate the clusters in two ways. +The first is to use the remote application name, i.e., `event.relation.app.name`, as above. + +The second way is to use different event handlers to handle each cluster events. +The implementation would be something like the following code: + +```python + +from charms.data_platform_libs.v0.data_interfaces import ( + DatabaseCreatedEvent, + DatabaseRequires, +) + +class ApplicationCharm(CharmBase): + # Application charm that connects to database charms. + + def __init__(self, *args): + super().__init__(*args) + + # Define the cluster aliases and one handler for each cluster database created event. + self.database = DatabaseRequires( + self, + relation_name="database", + database_name="database", + relations_aliases = ["cluster1", "cluster2"], + ) + self.framework.observe( + self.database.on.cluster1_database_created, self._on_cluster1_database_created + ) + self.framework.observe( + self.database.on.cluster2_database_created, self._on_cluster2_database_created + ) + + def _on_cluster1_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database on the cluster named cluster1 + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + ... + + def _on_cluster2_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database on the cluster named cluster2 + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + ... + +``` + +When it's needed to check whether a plugin (extension) is enabled on the PostgreSQL +charm, you can use the is_postgresql_plugin_enabled method. To use that, you need to +add the following dependency to your charmcraft.yaml file: + +```yaml + +parts: + charm: + charm-binary-python-packages: + - psycopg[binary] + +``` + +### Provider Charm + +Following an example of using the DatabaseRequestedEvent, in the context of the +database charm code: + +```python +from charms.data_platform_libs.v0.data_interfaces import DatabaseProvides + +class SampleCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + # Charm events defined in the database provides charm library. + self.provided_database = DatabaseProvides(self, relation_name="database") + self.framework.observe(self.provided_database.on.database_requested, + self._on_database_requested) + # Database generic helper + self.database = DatabaseHelper() + + def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: + # Handle the event triggered by a new database requested in the relation + # Retrieve the database name using the charm library. + db_name = event.database + # generate a new user credential + username = self.database.generate_user() + password = self.database.generate_password() + # set the credentials for the relation + self.provided_database.set_credentials(event.relation.id, username, password) + # set other variables for the relation event.set_tls("False") +``` +As shown above, the library provides a custom event (database_requested) to handle +the situation when an application charm requests a new database to be created. +It's preferred to subscribe to this event instead of relation changed event to avoid +creating a new database when other information other than a database name is +exchanged in the relation databag. + +### Kafka + +This library is the interface to use and interact with the Kafka charm. This library contains +custom events that add convenience to manage Kafka, and provides methods to consume the +application related data. + +#### Requirer Charm + +```python + +from charms.data_platform_libs.v0.data_interfaces import ( + BootstrapServerChangedEvent, + KafkaRequires, + TopicCreatedEvent, +) + +class ApplicationCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + self.kafka = KafkaRequires(self, "kafka_client", "test-topic") + self.framework.observe( + self.kafka.on.bootstrap_server_changed, self._on_kafka_bootstrap_server_changed + ) + self.framework.observe( + self.kafka.on.topic_created, self._on_kafka_topic_created + ) + + def _on_kafka_bootstrap_server_changed(self, event: BootstrapServerChangedEvent): + # Event triggered when a bootstrap server was changed for this application + + new_bootstrap_server = event.bootstrap_server + ... + + def _on_kafka_topic_created(self, event: TopicCreatedEvent): + # Event triggered when a topic was created for this application + username = event.username + password = event.password + tls = event.tls + tls_ca= event.tls_ca + bootstrap_server event.bootstrap_server + consumer_group_prefic = event.consumer_group_prefix + zookeeper_uris = event.zookeeper_uris + ... + +``` + +As shown above, the library provides some custom events to handle specific situations, +which are listed below: + +- topic_created: event emitted when the requested topic is created. +- bootstrap_server_changed: event emitted when the bootstrap server have changed. +- credential_changed: event emitted when the credentials of Kafka changed. + +### Provider Charm + +Following the previous example, this is an example of the provider charm. + +```python +class SampleCharm(CharmBase): + +from charms.data_platform_libs.v0.data_interfaces import ( + KafkaProvides, + TopicRequestedEvent, +) + + def __init__(self, *args): + super().__init__(*args) + + # Default charm events. + self.framework.observe(self.on.start, self._on_start) + + # Charm events defined in the Kafka Provides charm library. + self.kafka_provider = KafkaProvides(self, relation_name="kafka_client") + self.framework.observe(self.kafka_provider.on.topic_requested, self._on_topic_requested) + # Kafka generic helper + self.kafka = KafkaHelper() + + def _on_topic_requested(self, event: TopicRequestedEvent): + # Handle the on_topic_requested event. + + topic = event.topic + relation_id = event.relation.id + # set connection info in the databag relation + self.kafka_provider.set_bootstrap_server(relation_id, self.kafka.get_bootstrap_server()) + self.kafka_provider.set_credentials(relation_id, username=username, password=password) + self.kafka_provider.set_consumer_group_prefix(relation_id, ...) + self.kafka_provider.set_tls(relation_id, "False") + self.kafka_provider.set_zookeeper_uris(relation_id, ...) + +``` +As shown above, the library provides a custom event (topic_requested) to handle +the situation when an application charm requests a new topic to be created. +It is preferred to subscribe to this event instead of relation changed event to avoid +creating a new topic when other information other than a topic name is +exchanged in the relation databag. +""" + +import copy +import json +import logging +from abc import ABC, abstractmethod +from collections import UserDict, namedtuple +from datetime import datetime +from enum import Enum +from typing import ( + Callable, + Dict, + ItemsView, + KeysView, + List, + Optional, + Set, + Tuple, + Union, + ValuesView, +) + +from ops import JujuVersion, Model, Secret, SecretInfo, SecretNotFoundError +from ops.charm import ( + CharmBase, + CharmEvents, + RelationChangedEvent, + RelationCreatedEvent, + RelationEvent, + SecretChangedEvent, +) +from ops.framework import EventSource, Object +from ops.model import Application, ModelError, Relation, Unit + +# The unique Charmhub library identifier, never change it +LIBID = "6c3e6b6680d64e9c89e611d1a15f65be" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 39 + +PYDEPS = ["ops>=2.0.0"] + +# Starting from what LIBPATCH number to apply legacy solutions +# v0.17 was the last version without secrets +LEGACY_SUPPORT_FROM = 17 + +logger = logging.getLogger(__name__) + +Diff = namedtuple("Diff", "added changed deleted") +Diff.__doc__ = """ +A tuple for storing the diff between two data mappings. + +added - keys that were added +changed - keys that still exist but have new values +deleted - key that were deleted""" + + +PROV_SECRET_PREFIX = "secret-" +REQ_SECRET_FIELDS = "requested-secrets" +GROUP_MAPPING_FIELD = "secret_group_mapping" +GROUP_SEPARATOR = "@" + +MODEL_ERRORS = { + "not_leader": "this unit is not the leader", + "no_label_and_uri": "ERROR either URI or label should be used for getting an owned secret but not both", + "owner_no_refresh": "ERROR secret owner cannot use --refresh", +} + + +############################################################################## +# Exceptions +############################################################################## + + +class DataInterfacesError(Exception): + """Common ancestor for DataInterfaces related exceptions.""" + + +class SecretError(DataInterfacesError): + """Common ancestor for Secrets related exceptions.""" + + +class SecretAlreadyExistsError(SecretError): + """A secret that was to be added already exists.""" + + +class SecretsUnavailableError(SecretError): + """Secrets aren't yet available for Juju version used.""" + + +class SecretsIllegalUpdateError(SecretError): + """Secrets aren't yet available for Juju version used.""" + + +class IllegalOperationError(DataInterfacesError): + """To be used when an operation is not allowed to be performed.""" + + +############################################################################## +# Global helpers / utilities +############################################################################## + +############################################################################## +# Databag handling and comparison methods +############################################################################## + + +def get_encoded_dict( + relation: Relation, member: Union[Unit, Application], field: str +) -> Optional[Dict[str, str]]: + """Retrieve and decode an encoded field from relation data.""" + data = json.loads(relation.data[member].get(field, "{}")) + if isinstance(data, dict): + return data + logger.error("Unexpected datatype for %s instead of dict.", str(data)) + + +def get_encoded_list( + relation: Relation, member: Union[Unit, Application], field: str +) -> Optional[List[str]]: + """Retrieve and decode an encoded field from relation data.""" + data = json.loads(relation.data[member].get(field, "[]")) + if isinstance(data, list): + return data + logger.error("Unexpected datatype for %s instead of list.", str(data)) + + +def set_encoded_field( + relation: Relation, + member: Union[Unit, Application], + field: str, + value: Union[str, list, Dict[str, str]], +) -> None: + """Set an encoded field from relation data.""" + relation.data[member].update({field: json.dumps(value)}) + + +def diff(event: RelationChangedEvent, bucket: Optional[Union[Unit, Application]]) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + bucket: bucket of the databag (app or unit) + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + # Retrieve the old data from the data key in the application relation databag. + if not bucket: + return Diff([], [], []) + + old_data = get_encoded_dict(event.relation, bucket, "data") + + if not old_data: + old_data = {} + + # Retrieve the new data from the event relation databag. + new_data = ( + {key: value for key, value in event.relation.data[event.app].items() if key != "data"} + if event.app + else {} + ) + + # These are the keys that were added to the databag and triggered this event. + added = new_data.keys() - old_data.keys() # pyright: ignore [reportAssignmentType] + # These are the keys that were removed from the databag and triggered this event. + deleted = old_data.keys() - new_data.keys() # pyright: ignore [reportAssignmentType] + # These are the keys that already existed in the databag, + # but had their values changed. + changed = { + key + for key in old_data.keys() & new_data.keys() # pyright: ignore [reportAssignmentType] + if old_data[key] != new_data[key] # pyright: ignore [reportAssignmentType] + } + # Convert the new_data to a serializable format and save it for a next diff check. + set_encoded_field(event.relation, bucket, "data", new_data) + + # Return the diff with all possible changes. + return Diff(added, changed, deleted) + + +############################################################################## +# Module decorators +############################################################################## + + +def leader_only(f): + """Decorator to ensure that only leader can perform given operation.""" + + def wrapper(self, *args, **kwargs): + if self.component == self.local_app and not self.local_unit.is_leader(): + logger.error( + "This operation (%s()) can only be performed by the leader unit", f.__name__ + ) + return + return f(self, *args, **kwargs) + + wrapper.leader_only = True + return wrapper + + +def juju_secrets_only(f): + """Decorator to ensure that certain operations would be only executed on Juju3.""" + + def wrapper(self, *args, **kwargs): + if not self.secrets_enabled: + raise SecretsUnavailableError("Secrets unavailable on current Juju version") + return f(self, *args, **kwargs) + + return wrapper + + +def dynamic_secrets_only(f): + """Decorator to ensure that certain operations would be only executed when NO static secrets are defined.""" + + def wrapper(self, *args, **kwargs): + if self.static_secret_fields: + raise IllegalOperationError( + "Unsafe usage of statically and dynamically defined secrets, aborting." + ) + return f(self, *args, **kwargs) + + return wrapper + + +def either_static_or_dynamic_secrets(f): + """Decorator to ensure that static and dynamic secrets won't be used in parallel.""" + + def wrapper(self, *args, **kwargs): + if self.static_secret_fields and set(self.current_secret_fields) - set( + self.static_secret_fields + ): + raise IllegalOperationError( + "Unsafe usage of statically and dynamically defined secrets, aborting." + ) + return f(self, *args, **kwargs) + + return wrapper + + +def legacy_apply_from_version(version: int) -> Callable: + """Decorator to decide whether to apply a legacy function or not. + + Based on LEGACY_SUPPORT_FROM module variable value, the importer charm may only want + to apply legacy solutions starting from a specific LIBPATCH. + + NOTE: All 'legacy' functions have to be defined and called in a way that they return `None`. + This results in cleaner and more secure execution flows in case the function may be disabled. + This requirement implicitly means that legacy functions change the internal state strictly, + don't return information. + """ + + def decorator(f: Callable[..., None]): + """Signature is ensuring None return value.""" + f.legacy_version = version + + def wrapper(self, *args, **kwargs) -> None: + if version >= LEGACY_SUPPORT_FROM: + return f(self, *args, **kwargs) + + return wrapper + + return decorator + + +############################################################################## +# Helper classes +############################################################################## + + +class Scope(Enum): + """Peer relations scope.""" + + APP = "app" + UNIT = "unit" + + +class SecretGroup(str): + """Secret groups specific type.""" + + +class SecretGroupsAggregate(str): + """Secret groups with option to extend with additional constants.""" + + def __init__(self): + self.USER = SecretGroup("user") + self.TLS = SecretGroup("tls") + self.EXTRA = SecretGroup("extra") + + def __setattr__(self, name, value): + """Setting internal constants.""" + if name in self.__dict__: + raise RuntimeError("Can't set constant!") + else: + super().__setattr__(name, SecretGroup(value)) + + def groups(self) -> list: + """Return the list of stored SecretGroups.""" + return list(self.__dict__.values()) + + def get_group(self, group: str) -> Optional[SecretGroup]: + """If the input str translates to a group name, return that.""" + return SecretGroup(group) if group in self.groups() else None + + +SECRET_GROUPS = SecretGroupsAggregate() + + +class CachedSecret: + """Locally cache a secret. + + The data structure is precisely re-using/simulating as in the actual Secret Storage + """ + + KNOWN_MODEL_ERRORS = [MODEL_ERRORS["no_label_and_uri"], MODEL_ERRORS["owner_no_refresh"]] + + def __init__( + self, + model: Model, + component: Union[Application, Unit], + label: str, + secret_uri: Optional[str] = None, + legacy_labels: List[str] = [], + ): + self._secret_meta = None + self._secret_content = {} + self._secret_uri = secret_uri + self.label = label + self._model = model + self.component = component + self.legacy_labels = legacy_labels + self.current_label = None + + @property + def meta(self) -> Optional[Secret]: + """Getting cached secret meta-information.""" + if not self._secret_meta: + if not (self._secret_uri or self.label): + return + + try: + self._secret_meta = self._model.get_secret(label=self.label) + except SecretNotFoundError: + # Falling back to seeking for potential legacy labels + self._legacy_compat_find_secret_by_old_label() + + # If still not found, to be checked by URI, to be labelled with the proposed label + if not self._secret_meta and self._secret_uri: + self._secret_meta = self._model.get_secret(id=self._secret_uri, label=self.label) + return self._secret_meta + + ########################################################################## + # Backwards compatibility / Upgrades + ########################################################################## + # These functions are used to keep backwards compatibility on rolling upgrades + # Policy: + # All data is kept intact until the first write operation. (This allows a minimal + # grace period during which rollbacks are fully safe. For more info see the spec.) + # All data involves: + # - databag contents + # - secrets content + # - secret labels (!!!) + # Legacy functions must return None, and leave an equally consistent state whether + # they are executed or skipped (as a high enough versioned execution environment may + # not require so) + + # Compatibility + + @legacy_apply_from_version(34) + def _legacy_compat_find_secret_by_old_label(self) -> None: + """Compatibility function, allowing to find a secret by a legacy label. + + This functionality is typically needed when secret labels changed over an upgrade. + Until the first write operation, we need to maintain data as it was, including keeping + the old secret label. In order to keep track of the old label currently used to access + the secret, and additional 'current_label' field is being defined. + """ + for label in self.legacy_labels: + try: + self._secret_meta = self._model.get_secret(label=label) + except SecretNotFoundError: + pass + else: + if label != self.label: + self.current_label = label + return + + # Migrations + + @legacy_apply_from_version(34) + def _legacy_migration_to_new_label_if_needed(self) -> None: + """Helper function to re-create the secret with a different label. + + Juju does not provide a way to change secret labels. + Thus whenever moving from secrets version that involves secret label changes, + we "re-create" the existing secret, and attach the new label to the new + secret, to be used from then on. + + Note: we replace the old secret with a new one "in place", as we can't + easily switch the containing SecretCache structure to point to a new secret. + Instead we are changing the 'self' (CachedSecret) object to point to the + new instance. + """ + if not self.current_label or not (self.meta and self._secret_meta): + return + + # Create a new secret with the new label + content = self._secret_meta.get_content() + self._secret_uri = None + + # It will be nice to have the possibility to check if we are the owners of the secret... + try: + self._secret_meta = self.add_secret(content, label=self.label) + except ModelError as err: + if MODEL_ERRORS["not_leader"] not in str(err): + raise + self.current_label = None + + ########################################################################## + # Public functions + ########################################################################## + + def add_secret( + self, + content: Dict[str, str], + relation: Optional[Relation] = None, + label: Optional[str] = None, + ) -> Secret: + """Create a new secret.""" + if self._secret_uri: + raise SecretAlreadyExistsError( + "Secret is already defined with uri %s", self._secret_uri + ) + + label = self.label if not label else label + + secret = self.component.add_secret(content, label=label) + if relation and relation.app != self._model.app: + # If it's not a peer relation, grant is to be applied + secret.grant(relation) + self._secret_uri = secret.id + self._secret_meta = secret + return self._secret_meta + + def get_content(self) -> Dict[str, str]: + """Getting cached secret content.""" + if not self._secret_content: + if self.meta: + try: + self._secret_content = self.meta.get_content(refresh=True) + except (ValueError, ModelError) as err: + # https://bugs.launchpad.net/juju/+bug/2042596 + # Only triggered when 'refresh' is set + if isinstance(err, ModelError) and not any( + msg in str(err) for msg in self.KNOWN_MODEL_ERRORS + ): + raise + # Due to: ValueError: Secret owner cannot use refresh=True + self._secret_content = self.meta.get_content() + return self._secret_content + + def set_content(self, content: Dict[str, str]) -> None: + """Setting cached secret content.""" + if not self.meta: + return + + # DPE-4182: do not create new revision if the content stay the same + if content == self.get_content(): + return + + if content: + self._legacy_migration_to_new_label_if_needed() + self.meta.set_content(content) + self._secret_content = content + else: + self.meta.remove_all_revisions() + + def get_info(self) -> Optional[SecretInfo]: + """Wrapper function to apply the corresponding call on the Secret object within CachedSecret if any.""" + if self.meta: + return self.meta.get_info() + + def remove(self) -> None: + """Remove secret.""" + if not self.meta: + raise SecretsUnavailableError("Non-existent secret was attempted to be removed.") + try: + self.meta.remove_all_revisions() + except SecretNotFoundError: + pass + self._secret_content = {} + self._secret_meta = None + self._secret_uri = None + + +class SecretCache: + """A data structure storing CachedSecret objects.""" + + def __init__(self, model: Model, component: Union[Application, Unit]): + self._model = model + self.component = component + self._secrets: Dict[str, CachedSecret] = {} + + def get( + self, label: str, uri: Optional[str] = None, legacy_labels: List[str] = [] + ) -> Optional[CachedSecret]: + """Getting a secret from Juju Secret store or cache.""" + if not self._secrets.get(label): + secret = CachedSecret( + self._model, self.component, label, uri, legacy_labels=legacy_labels + ) + if secret.meta: + self._secrets[label] = secret + return self._secrets.get(label) + + def add(self, label: str, content: Dict[str, str], relation: Relation) -> CachedSecret: + """Adding a secret to Juju Secret.""" + if self._secrets.get(label): + raise SecretAlreadyExistsError(f"Secret {label} already exists") + + secret = CachedSecret(self._model, self.component, label) + secret.add_secret(content, relation) + self._secrets[label] = secret + return self._secrets[label] + + def remove(self, label: str) -> None: + """Remove a secret from the cache.""" + if secret := self.get(label): + try: + secret.remove() + self._secrets.pop(label) + except (SecretsUnavailableError, KeyError): + pass + else: + return + logging.debug("Non-existing Juju Secret was attempted to be removed %s", label) + + +################################################################################ +# Relation Data base/abstract ancestors (i.e. parent classes) +################################################################################ + + +# Base Data + + +class DataDict(UserDict): + """Python Standard Library 'dict' - like representation of Relation Data.""" + + def __init__(self, relation_data: "Data", relation_id: int): + self.relation_data = relation_data + self.relation_id = relation_id + + @property + def data(self) -> Dict[str, str]: + """Return the full content of the Abstract Relation Data dictionary.""" + result = self.relation_data.fetch_my_relation_data([self.relation_id]) + try: + result_remote = self.relation_data.fetch_relation_data([self.relation_id]) + except NotImplementedError: + result_remote = {self.relation_id: {}} + if result: + result_remote[self.relation_id].update(result[self.relation_id]) + return result_remote.get(self.relation_id, {}) + + def __setitem__(self, key: str, item: str) -> None: + """Set an item of the Abstract Relation Data dictionary.""" + self.relation_data.update_relation_data(self.relation_id, {key: item}) + + def __getitem__(self, key: str) -> str: + """Get an item of the Abstract Relation Data dictionary.""" + result = None + + # Avoiding "leader_only" error when cross-charm non-leader unit, not to report useless error + if ( + not hasattr(self.relation_data.fetch_my_relation_field, "leader_only") + or self.relation_data.component != self.relation_data.local_app + or self.relation_data.local_unit.is_leader() + ): + result = self.relation_data.fetch_my_relation_field(self.relation_id, key) + + if not result: + try: + result = self.relation_data.fetch_relation_field(self.relation_id, key) + except NotImplementedError: + pass + + if not result: + raise KeyError + return result + + def __eq__(self, d: dict) -> bool: + """Equality.""" + return self.data == d + + def __repr__(self) -> str: + """String representation Abstract Relation Data dictionary.""" + return repr(self.data) + + def __len__(self) -> int: + """Length of the Abstract Relation Data dictionary.""" + return len(self.data) + + def __delitem__(self, key: str) -> None: + """Delete an item of the Abstract Relation Data dictionary.""" + self.relation_data.delete_relation_data(self.relation_id, [key]) + + def has_key(self, key: str) -> bool: + """Does the key exist in the Abstract Relation Data dictionary?""" + return key in self.data + + def update(self, items: Dict[str, str]): + """Update the Abstract Relation Data dictionary.""" + self.relation_data.update_relation_data(self.relation_id, items) + + def keys(self) -> KeysView[str]: + """Keys of the Abstract Relation Data dictionary.""" + return self.data.keys() + + def values(self) -> ValuesView[str]: + """Values of the Abstract Relation Data dictionary.""" + return self.data.values() + + def items(self) -> ItemsView[str, str]: + """Items of the Abstract Relation Data dictionary.""" + return self.data.items() + + def pop(self, item: str) -> str: + """Pop an item of the Abstract Relation Data dictionary.""" + result = self.relation_data.fetch_my_relation_field(self.relation_id, item) + if not result: + raise KeyError(f"Item {item} doesn't exist.") + self.relation_data.delete_relation_data(self.relation_id, [item]) + return result + + def __contains__(self, item: str) -> bool: + """Does the Abstract Relation Data dictionary contain item?""" + return item in self.data.values() + + def __iter__(self): + """Iterate through the Abstract Relation Data dictionary.""" + return iter(self.data) + + def get(self, key: str, default: Optional[str] = None) -> Optional[str]: + """Safely get an item of the Abstract Relation Data dictionary.""" + try: + if result := self[key]: + return result + except KeyError: + return default + + +class Data(ABC): + """Base relation data mainpulation (abstract) class.""" + + SCOPE = Scope.APP + + # Local map to associate mappings with secrets potentially as a group + SECRET_LABEL_MAP = { + "username": SECRET_GROUPS.USER, + "password": SECRET_GROUPS.USER, + "uris": SECRET_GROUPS.USER, + "tls": SECRET_GROUPS.TLS, + "tls-ca": SECRET_GROUPS.TLS, + } + + def __init__( + self, + model: Model, + relation_name: str, + ) -> None: + self._model = model + self.local_app = self._model.app + self.local_unit = self._model.unit + self.relation_name = relation_name + self._jujuversion = None + self.component = self.local_app if self.SCOPE == Scope.APP else self.local_unit + self.secrets = SecretCache(self._model, self.component) + self.data_component = None + + @property + def relations(self) -> List[Relation]: + """The list of Relation instances associated with this relation_name.""" + return [ + relation + for relation in self._model.relations[self.relation_name] + if self._is_relation_active(relation) + ] + + @property + def secrets_enabled(self): + """Is this Juju version allowing for Secrets usage?""" + if not self._jujuversion: + self._jujuversion = JujuVersion.from_environ() + return self._jujuversion.has_secrets + + @property + def secret_label_map(self): + """Exposing secret-label map via a property -- could be overridden in descendants!""" + return self.SECRET_LABEL_MAP + + # Mandatory overrides for internal/helper methods + + @abstractmethod + def _get_relation_secret( + self, relation_id: int, group_mapping: SecretGroup, relation_name: Optional[str] = None + ) -> Optional[CachedSecret]: + """Retrieve a Juju Secret that's been stored in the relation databag.""" + raise NotImplementedError + + @abstractmethod + def _fetch_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetch data available (directily or indirectly -- i.e. secrets) from the relation.""" + raise NotImplementedError + + @abstractmethod + def _fetch_my_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetch data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + raise NotImplementedError + + @abstractmethod + def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> None: + """Update data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + raise NotImplementedError + + @abstractmethod + def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: + """Delete data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + raise NotImplementedError + + # Optional overrides + + def _legacy_apply_on_fetch(self) -> None: + """This function should provide a list of compatibility functions to be applied when fetching (legacy) data.""" + pass + + def _legacy_apply_on_update(self, fields: List[str]) -> None: + """This function should provide a list of compatibility functions to be applied when writing data. + + Since data may be at a legacy version, migration may be mandatory. + """ + pass + + def _legacy_apply_on_delete(self, fields: List[str]) -> None: + """This function should provide a list of compatibility functions to be applied when deleting (legacy) data.""" + pass + + # Internal helper methods + + @staticmethod + def _is_relation_active(relation: Relation): + """Whether the relation is active based on contained data.""" + try: + _ = repr(relation.data) + return True + except (RuntimeError, ModelError): + return False + + @staticmethod + def _is_secret_field(field: str) -> bool: + """Is the field in question a secret reference (URI) field or not?""" + return field.startswith(PROV_SECRET_PREFIX) + + @staticmethod + def _generate_secret_label( + relation_name: str, relation_id: int, group_mapping: SecretGroup + ) -> str: + """Generate unique group_mappings for secrets within a relation context.""" + return f"{relation_name}.{relation_id}.{group_mapping}.secret" + + def _generate_secret_field_name(self, group_mapping: SecretGroup) -> str: + """Generate unique group_mappings for secrets within a relation context.""" + return f"{PROV_SECRET_PREFIX}{group_mapping}" + + def _relation_from_secret_label(self, secret_label: str) -> Optional[Relation]: + """Retrieve the relation that belongs to a secret label.""" + contents = secret_label.split(".") + + if not (contents and len(contents) >= 3): + return + + contents.pop() # ".secret" at the end + contents.pop() # Group mapping + relation_id = contents.pop() + try: + relation_id = int(relation_id) + except ValueError: + return + + # In case '.' character appeared in relation name + relation_name = ".".join(contents) + + try: + return self.get_relation(relation_name, relation_id) + except ModelError: + return + + def _group_secret_fields(self, secret_fields: List[str]) -> Dict[SecretGroup, List[str]]: + """Helper function to arrange secret mappings under their group. + + NOTE: All unrecognized items end up in the 'extra' secret bucket. + Make sure only secret fields are passed! + """ + secret_fieldnames_grouped = {} + for key in secret_fields: + if group := self.secret_label_map.get(key): + secret_fieldnames_grouped.setdefault(group, []).append(key) + else: + secret_fieldnames_grouped.setdefault(SECRET_GROUPS.EXTRA, []).append(key) + return secret_fieldnames_grouped + + def _get_group_secret_contents( + self, + relation: Relation, + group: SecretGroup, + secret_fields: Union[Set[str], List[str]] = [], + ) -> Dict[str, str]: + """Helper function to retrieve collective, requested contents of a secret.""" + if (secret := self._get_relation_secret(relation.id, group)) and ( + secret_data := secret.get_content() + ): + return { + k: v for k, v in secret_data.items() if not secret_fields or k in secret_fields + } + return {} + + def _content_for_secret_group( + self, content: Dict[str, str], secret_fields: Set[str], group_mapping: SecretGroup + ) -> Dict[str, str]: + """Select : pairs from input, that belong to this particular Secret group.""" + if group_mapping == SECRET_GROUPS.EXTRA: + return { + k: v + for k, v in content.items() + if k in secret_fields and k not in self.secret_label_map.keys() + } + + return { + k: v + for k, v in content.items() + if k in secret_fields and self.secret_label_map.get(k) == group_mapping + } + + @juju_secrets_only + def _get_relation_secret_data( + self, relation_id: int, group_mapping: SecretGroup, relation_name: Optional[str] = None + ) -> Optional[Dict[str, str]]: + """Retrieve contents of a Juju Secret that's been stored in the relation databag.""" + secret = self._get_relation_secret(relation_id, group_mapping, relation_name) + if secret: + return secret.get_content() + + # Core operations on Relation Fields manipulations (regardless whether the field is in the databag or in a secret) + # Internal functions to be called directly from transparent public interface functions (+closely related helpers) + + def _process_secret_fields( + self, + relation: Relation, + req_secret_fields: Optional[List[str]], + impacted_rel_fields: List[str], + operation: Callable, + *args, + **kwargs, + ) -> Tuple[Dict[str, str], Set[str]]: + """Isolate target secret fields of manipulation, and execute requested operation by Secret Group.""" + result = {} + + # If the relation started on a databag, we just stay on the databag + # (Rolling upgrades may result in a relation starting on databag, getting secrets enabled on-the-fly) + # self.local_app is sufficient to check (ignored if Requires, never has secrets -- works if Provider) + fallback_to_databag = ( + req_secret_fields + and (self.local_unit == self._model.unit and self.local_unit.is_leader()) + and set(req_secret_fields) & set(relation.data[self.component]) + ) + + normal_fields = set(impacted_rel_fields) + if req_secret_fields and self.secrets_enabled and not fallback_to_databag: + normal_fields = normal_fields - set(req_secret_fields) + secret_fields = set(impacted_rel_fields) - set(normal_fields) + + secret_fieldnames_grouped = self._group_secret_fields(list(secret_fields)) + + for group in secret_fieldnames_grouped: + # operation() should return nothing when all goes well + if group_result := operation(relation, group, secret_fields, *args, **kwargs): + # If "meaningful" data was returned, we take it. (Some 'operation'-s only return success/failure.) + if isinstance(group_result, dict): + result.update(group_result) + else: + # If it wasn't found as a secret, let's give it a 2nd chance as "normal" field + # Needed when Juju3 Requires meets Juju2 Provider + normal_fields |= set(secret_fieldnames_grouped[group]) + return (result, normal_fields) + + def _fetch_relation_data_without_secrets( + self, component: Union[Application, Unit], relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetching databag contents when no secrets are involved. + + Since the Provider's databag is the only one holding secrest, we can apply + a simplified workflow to read the Require's side's databag. + This is used typically when the Provider side wants to read the Requires side's data, + or when the Requires side may want to read its own data. + """ + if component not in relation.data or not relation.data[component]: + return {} + + if fields: + return { + k: relation.data[component][k] for k in fields if k in relation.data[component] + } + else: + return dict(relation.data[component]) + + def _fetch_relation_data_with_secrets( + self, + component: Union[Application, Unit], + req_secret_fields: Optional[List[str]], + relation: Relation, + fields: Optional[List[str]] = None, + ) -> Dict[str, str]: + """Fetching databag contents when secrets may be involved. + + This function has internal logic to resolve if a requested field may be "hidden" + within a Relation Secret, or directly available as a databag field. Typically + used to read the Provider side's databag (eigher by the Requires side, or by + Provider side itself). + """ + result = {} + normal_fields = [] + + if not fields: + if component not in relation.data: + return {} + + all_fields = list(relation.data[component].keys()) + normal_fields = [field for field in all_fields if not self._is_secret_field(field)] + fields = normal_fields + req_secret_fields if req_secret_fields else normal_fields + + if fields: + result, normal_fields = self._process_secret_fields( + relation, req_secret_fields, fields, self._get_group_secret_contents + ) + + # Processing "normal" fields. May include leftover from what we couldn't retrieve as a secret. + # (Typically when Juju3 Requires meets Juju2 Provider) + if normal_fields: + result.update( + self._fetch_relation_data_without_secrets(component, relation, list(normal_fields)) + ) + return result + + def _update_relation_data_without_secrets( + self, component: Union[Application, Unit], relation: Relation, data: Dict[str, str] + ) -> None: + """Updating databag contents when no secrets are involved.""" + if component not in relation.data or relation.data[component] is None: + return + + if relation: + relation.data[component].update(data) + + def _delete_relation_data_without_secrets( + self, component: Union[Application, Unit], relation: Relation, fields: List[str] + ) -> None: + """Remove databag fields 'fields' from Relation.""" + if component not in relation.data or relation.data[component] is None: + return + + for field in fields: + try: + relation.data[component].pop(field) + except KeyError: + logger.debug( + "Non-existing field '%s' was attempted to be removed from the databag (relation ID: %s)", + str(field), + str(relation.id), + ) + pass + + # Public interface methods + # Handling Relation Fields seamlessly, regardless if in databag or a Juju Secret + + def as_dict(self, relation_id: int) -> UserDict: + """Dict behavior representation of the Abstract Data.""" + return DataDict(self, relation_id) + + def get_relation(self, relation_name, relation_id) -> Relation: + """Safe way of retrieving a relation.""" + relation = self._model.get_relation(relation_name, relation_id) + + if not relation: + raise DataInterfacesError( + "Relation %s %s couldn't be retrieved", relation_name, relation_id + ) + + return relation + + def get_secret_uri(self, relation: Relation, group: SecretGroup) -> Optional[str]: + """Get the secret URI for the corresponding group.""" + secret_field = self._generate_secret_field_name(group) + return relation.data[self.component].get(secret_field) + + def set_secret_uri(self, relation: Relation, group: SecretGroup, secret_uri: str) -> None: + """Set the secret URI for the corresponding group.""" + secret_field = self._generate_secret_field_name(group) + relation.data[self.component][secret_field] = secret_uri + + def fetch_relation_data( + self, + relation_ids: Optional[List[int]] = None, + fields: Optional[List[str]] = None, + relation_name: Optional[str] = None, + ) -> Dict[int, Dict[str, str]]: + """Retrieves data from relation. + + This function can be used to retrieve data from a relation + in the charm code when outside an event callback. + Function cannot be used in `*-relation-broken` events and will raise an exception. + + Returns: + a dict of the values stored in the relation data bag + for all relation instances (indexed by the relation ID). + """ + self._legacy_apply_on_fetch() + + if not relation_name: + relation_name = self.relation_name + + relations = [] + if relation_ids: + relations = [ + self.get_relation(relation_name, relation_id) for relation_id in relation_ids + ] + else: + relations = self.relations + + data = {} + for relation in relations: + if not relation_ids or (relation_ids and relation.id in relation_ids): + data[relation.id] = self._fetch_specific_relation_data(relation, fields) + return data + + def fetch_relation_field( + self, relation_id: int, field: str, relation_name: Optional[str] = None + ) -> Optional[str]: + """Get a single field from the relation data.""" + return ( + self.fetch_relation_data([relation_id], [field], relation_name) + .get(relation_id, {}) + .get(field) + ) + + def fetch_my_relation_data( + self, + relation_ids: Optional[List[int]] = None, + fields: Optional[List[str]] = None, + relation_name: Optional[str] = None, + ) -> Optional[Dict[int, Dict[str, str]]]: + """Fetch data of the 'owner' (or 'this app') side of the relation. + + NOTE: Since only the leader can read the relation's 'this_app'-side + Application databag, the functionality is limited to leaders + """ + self._legacy_apply_on_fetch() + + if not relation_name: + relation_name = self.relation_name + + relations = [] + if relation_ids: + relations = [ + self.get_relation(relation_name, relation_id) for relation_id in relation_ids + ] + else: + relations = self.relations + + data = {} + for relation in relations: + if not relation_ids or relation.id in relation_ids: + data[relation.id] = self._fetch_my_specific_relation_data(relation, fields) + return data + + def fetch_my_relation_field( + self, relation_id: int, field: str, relation_name: Optional[str] = None + ) -> Optional[str]: + """Get a single field from the relation data -- owner side. + + NOTE: Since only the leader can read the relation's 'this_app'-side + Application databag, the functionality is limited to leaders + """ + if relation_data := self.fetch_my_relation_data([relation_id], [field], relation_name): + return relation_data.get(relation_id, {}).get(field) + + @leader_only + def update_relation_data(self, relation_id: int, data: dict) -> None: + """Update the data within the relation.""" + self._legacy_apply_on_update(list(data.keys())) + + relation_name = self.relation_name + relation = self.get_relation(relation_name, relation_id) + return self._update_relation_data(relation, data) + + @leader_only + def delete_relation_data(self, relation_id: int, fields: List[str]) -> None: + """Remove field from the relation.""" + self._legacy_apply_on_delete(fields) + + relation_name = self.relation_name + relation = self.get_relation(relation_name, relation_id) + return self._delete_relation_data(relation, fields) + + +class EventHandlers(Object): + """Requires-side of the relation.""" + + def __init__(self, charm: CharmBase, relation_data: Data, unique_key: str = ""): + """Manager of base client relations.""" + if not unique_key: + unique_key = relation_data.relation_name + super().__init__(charm, unique_key) + + self.charm = charm + self.relation_data = relation_data + + self.framework.observe( + charm.on[self.relation_data.relation_name].relation_changed, + self._on_relation_changed_event, + ) + + def _diff(self, event: RelationChangedEvent) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + return diff(event, self.relation_data.data_component) + + @abstractmethod + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation data has changed.""" + raise NotImplementedError + + +# Base ProviderData and RequiresData + + +class ProviderData(Data): + """Base provides-side of the data products relation.""" + + def __init__( + self, + model: Model, + relation_name: str, + ) -> None: + super().__init__(model, relation_name) + self.data_component = self.local_app + + # Private methods handling secrets + + @juju_secrets_only + def _add_relation_secret( + self, + relation: Relation, + group_mapping: SecretGroup, + secret_fields: Set[str], + data: Dict[str, str], + uri_to_databag=True, + ) -> bool: + """Add a new Juju Secret that will be registered in the relation databag.""" + if uri_to_databag and self.get_secret_uri(relation, group_mapping): + logging.error("Secret for relation %s already exists, not adding again", relation.id) + return False + + content = self._content_for_secret_group(data, secret_fields, group_mapping) + + label = self._generate_secret_label(self.relation_name, relation.id, group_mapping) + secret = self.secrets.add(label, content, relation) + + # According to lint we may not have a Secret ID + if uri_to_databag and secret.meta and secret.meta.id: + self.set_secret_uri(relation, group_mapping, secret.meta.id) + + # Return the content that was added + return True + + @juju_secrets_only + def _update_relation_secret( + self, + relation: Relation, + group_mapping: SecretGroup, + secret_fields: Set[str], + data: Dict[str, str], + ) -> bool: + """Update the contents of an existing Juju Secret, referred in the relation databag.""" + secret = self._get_relation_secret(relation.id, group_mapping) + + if not secret: + logging.error("Can't update secret for relation %s", relation.id) + return False + + content = self._content_for_secret_group(data, secret_fields, group_mapping) + + old_content = secret.get_content() + full_content = copy.deepcopy(old_content) + full_content.update(content) + secret.set_content(full_content) + + # Return True on success + return True + + def _add_or_update_relation_secrets( + self, + relation: Relation, + group: SecretGroup, + secret_fields: Set[str], + data: Dict[str, str], + uri_to_databag=True, + ) -> bool: + """Update contents for Secret group. If the Secret doesn't exist, create it.""" + if self._get_relation_secret(relation.id, group): + return self._update_relation_secret(relation, group, secret_fields, data) + else: + return self._add_relation_secret(relation, group, secret_fields, data, uri_to_databag) + + @juju_secrets_only + def _delete_relation_secret( + self, relation: Relation, group: SecretGroup, secret_fields: List[str], fields: List[str] + ) -> bool: + """Update the contents of an existing Juju Secret, referred in the relation databag.""" + secret = self._get_relation_secret(relation.id, group) + + if not secret: + logging.error("Can't delete secret for relation %s", str(relation.id)) + return False + + old_content = secret.get_content() + new_content = copy.deepcopy(old_content) + for field in fields: + try: + new_content.pop(field) + except KeyError: + logging.debug( + "Non-existing secret was attempted to be removed %s, %s", + str(relation.id), + str(field), + ) + return False + + # Remove secret from the relation if it's fully gone + if not new_content: + field = self._generate_secret_field_name(group) + try: + relation.data[self.component].pop(field) + except KeyError: + pass + label = self._generate_secret_label(self.relation_name, relation.id, group) + self.secrets.remove(label) + else: + secret.set_content(new_content) + + # Return the content that was removed + return True + + # Mandatory internal overrides + + @juju_secrets_only + def _get_relation_secret( + self, relation_id: int, group_mapping: SecretGroup, relation_name: Optional[str] = None + ) -> Optional[CachedSecret]: + """Retrieve a Juju Secret that's been stored in the relation databag.""" + if not relation_name: + relation_name = self.relation_name + + label = self._generate_secret_label(relation_name, relation_id, group_mapping) + if secret := self.secrets.get(label): + return secret + + relation = self._model.get_relation(relation_name, relation_id) + if not relation: + return + + if secret_uri := self.get_secret_uri(relation, group_mapping): + return self.secrets.get(label, secret_uri) + + def _fetch_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetching relation data for Provider. + + NOTE: Since all secret fields are in the Provider side of the databag, we don't need to worry about that + """ + if not relation.app: + return {} + + return self._fetch_relation_data_without_secrets(relation.app, relation, fields) + + def _fetch_my_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> dict: + """Fetching our own relation data.""" + secret_fields = None + if relation.app: + secret_fields = get_encoded_list(relation, relation.app, REQ_SECRET_FIELDS) + + return self._fetch_relation_data_with_secrets( + self.local_app, + secret_fields, + relation, + fields, + ) + + def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> None: + """Set values for fields not caring whether it's a secret or not.""" + req_secret_fields = [] + if relation.app: + req_secret_fields = get_encoded_list(relation, relation.app, REQ_SECRET_FIELDS) + + _, normal_fields = self._process_secret_fields( + relation, + req_secret_fields, + list(data), + self._add_or_update_relation_secrets, + data=data, + ) + + normal_content = {k: v for k, v in data.items() if k in normal_fields} + self._update_relation_data_without_secrets(self.local_app, relation, normal_content) + + def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: + """Delete fields from the Relation not caring whether it's a secret or not.""" + req_secret_fields = [] + if relation.app: + req_secret_fields = get_encoded_list(relation, relation.app, REQ_SECRET_FIELDS) + + _, normal_fields = self._process_secret_fields( + relation, req_secret_fields, fields, self._delete_relation_secret, fields=fields + ) + self._delete_relation_data_without_secrets(self.local_app, relation, list(normal_fields)) + + # Public methods - "native" + + def set_credentials(self, relation_id: int, username: str, password: str) -> None: + """Set credentials. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + username: user that was created. + password: password of the created user. + """ + self.update_relation_data(relation_id, {"username": username, "password": password}) + + def set_tls(self, relation_id: int, tls: str) -> None: + """Set whether TLS is enabled. + + Args: + relation_id: the identifier for a particular relation. + tls: whether tls is enabled (True or False). + """ + self.update_relation_data(relation_id, {"tls": tls}) + + def set_tls_ca(self, relation_id: int, tls_ca: str) -> None: + """Set the TLS CA in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + tls_ca: TLS certification authority. + """ + self.update_relation_data(relation_id, {"tls-ca": tls_ca}) + + # Public functions -- inherited + + fetch_my_relation_data = leader_only(Data.fetch_my_relation_data) + fetch_my_relation_field = leader_only(Data.fetch_my_relation_field) + + +class RequirerData(Data): + """Requirer-side of the relation.""" + + SECRET_FIELDS = ["username", "password", "tls", "tls-ca", "uris"] + + def __init__( + self, + model, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ): + """Manager of base client relations.""" + super().__init__(model, relation_name) + self.extra_user_roles = extra_user_roles + self._secret_fields = list(self.SECRET_FIELDS) + if additional_secret_fields: + self._secret_fields += additional_secret_fields + self.data_component = self.local_unit + + @property + def secret_fields(self) -> Optional[List[str]]: + """Local access to secrets field, in case they are being used.""" + if self.secrets_enabled: + return self._secret_fields + + # Internal helper functions + + def _register_secret_to_relation( + self, relation_name: str, relation_id: int, secret_id: str, group: SecretGroup + ): + """Fetch secrets and apply local label on them. + + [MAGIC HERE] + If we fetch a secret using get_secret(id=, label=), + then will be "stuck" on the Secret object, whenever it may + appear (i.e. as an event attribute, or fetched manually) on future occasions. + + This will allow us to uniquely identify the secret on Provider side (typically on + 'secret-changed' events), and map it to the corresponding relation. + """ + label = self._generate_secret_label(relation_name, relation_id, group) + + # Fetching the Secret's meta information ensuring that it's locally getting registered with + CachedSecret(self._model, self.component, label, secret_id).meta + + def _register_secrets_to_relation(self, relation: Relation, params_name_list: List[str]): + """Make sure that secrets of the provided list are locally 'registered' from the databag. + + More on 'locally registered' magic is described in _register_secret_to_relation() method + """ + if not relation.app: + return + + for group in SECRET_GROUPS.groups(): + secret_field = self._generate_secret_field_name(group) + if secret_field in params_name_list and ( + secret_uri := self.get_secret_uri(relation, group) + ): + self._register_secret_to_relation(relation.name, relation.id, secret_uri, group) + + def _is_resource_created_for_relation(self, relation: Relation) -> bool: + if not relation.app: + return False + + data = self.fetch_relation_data([relation.id], ["username", "password"]).get( + relation.id, {} + ) + return bool(data.get("username")) and bool(data.get("password")) + + # Public functions + + def get_secret_uri(self, relation: Relation, group: SecretGroup) -> Optional[str]: + """Getting relation secret URI for the corresponding Secret Group.""" + secret_field = self._generate_secret_field_name(group) + return relation.data[relation.app].get(secret_field) + + def set_secret_uri(self, relation: Relation, group: SecretGroup, uri: str) -> None: + """Setting relation secret URI is not possible for a Requirer.""" + raise NotImplementedError("Requirer can not change the relation secret URI.") + + def is_resource_created(self, relation_id: Optional[int] = None) -> bool: + """Check if the resource has been created. + + This function can be used to check if the Provider answered with data in the charm code + when outside an event callback. + + Args: + relation_id (int, optional): When provided the check is done only for the relation id + provided, otherwise the check is done for all relations + + Returns: + True or False + + Raises: + IndexError: If relation_id is provided but that relation does not exist + """ + if relation_id is not None: + try: + relation = [relation for relation in self.relations if relation.id == relation_id][ + 0 + ] + return self._is_resource_created_for_relation(relation) + except IndexError: + raise IndexError(f"relation id {relation_id} cannot be accessed") + else: + return ( + all( + self._is_resource_created_for_relation(relation) for relation in self.relations + ) + if self.relations + else False + ) + + # Mandatory internal overrides + + @juju_secrets_only + def _get_relation_secret( + self, relation_id: int, group: SecretGroup, relation_name: Optional[str] = None + ) -> Optional[CachedSecret]: + """Retrieve a Juju Secret that's been stored in the relation databag.""" + if not relation_name: + relation_name = self.relation_name + + label = self._generate_secret_label(relation_name, relation_id, group) + return self.secrets.get(label) + + def _fetch_specific_relation_data( + self, relation, fields: Optional[List[str]] = None + ) -> Dict[str, str]: + """Fetching Requirer data -- that may include secrets.""" + if not relation.app: + return {} + return self._fetch_relation_data_with_secrets( + relation.app, self.secret_fields, relation, fields + ) + + def _fetch_my_specific_relation_data(self, relation, fields: Optional[List[str]]) -> dict: + """Fetching our own relation data.""" + return self._fetch_relation_data_without_secrets(self.local_app, relation, fields) + + def _update_relation_data(self, relation: Relation, data: dict) -> None: + """Updates a set of key-value pairs in the relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation: the particular relation. + data: dict containing the key-value pairs + that should be updated in the relation. + """ + return self._update_relation_data_without_secrets(self.local_app, relation, data) + + def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: + """Deletes a set of fields from the relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation: the particular relation. + fields: list containing the field names that should be removed from the relation. + """ + return self._delete_relation_data_without_secrets(self.local_app, relation, fields) + + # Public functions -- inherited + + fetch_my_relation_data = leader_only(Data.fetch_my_relation_data) + fetch_my_relation_field = leader_only(Data.fetch_my_relation_field) + + +class RequirerEventHandlers(EventHandlers): + """Requires-side of the relation.""" + + def __init__(self, charm: CharmBase, relation_data: RequirerData, unique_key: str = ""): + """Manager of base client relations.""" + super().__init__(charm, relation_data, unique_key) + + self.framework.observe( + self.charm.on[relation_data.relation_name].relation_created, + self._on_relation_created_event, + ) + self.framework.observe( + charm.on.secret_changed, + self._on_secret_changed_event, + ) + + # Event handlers + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the relation is created.""" + if not self.relation_data.local_unit.is_leader(): + return + + if self.relation_data.secret_fields: # pyright: ignore [reportAttributeAccessIssue] + set_encoded_field( + event.relation, + self.relation_data.component, + REQ_SECRET_FIELDS, + self.relation_data.secret_fields, # pyright: ignore [reportAttributeAccessIssue] + ) + + @abstractmethod + def _on_secret_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation data has changed.""" + raise NotImplementedError + + +################################################################################ +# Peer Relation Data +################################################################################ + + +class DataPeerData(RequirerData, ProviderData): + """Represents peer relations data.""" + + SECRET_FIELDS = [] + SECRET_FIELD_NAME = "internal_secret" + SECRET_LABEL_MAP = {} + + def __init__( + self, + model, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + additional_secret_group_mapping: Dict[str, str] = {}, + secret_field_name: Optional[str] = None, + deleted_label: Optional[str] = None, + ): + RequirerData.__init__( + self, + model, + relation_name, + extra_user_roles, + additional_secret_fields, + ) + self.secret_field_name = secret_field_name if secret_field_name else self.SECRET_FIELD_NAME + self.deleted_label = deleted_label + self._secret_label_map = {} + + # Legacy information holders + self._legacy_labels = [] + self._legacy_secret_uri = None + + # Secrets that are being dynamically added within the scope of this event handler run + self._new_secrets = [] + self._additional_secret_group_mapping = additional_secret_group_mapping + + for group, fields in additional_secret_group_mapping.items(): + if group not in SECRET_GROUPS.groups(): + setattr(SECRET_GROUPS, group, group) + for field in fields: + secret_group = SECRET_GROUPS.get_group(group) + internal_field = self._field_to_internal_name(field, secret_group) + self._secret_label_map.setdefault(group, []).append(internal_field) + self._secret_fields.append(internal_field) + + @property + def scope(self) -> Optional[Scope]: + """Turn component information into Scope.""" + if isinstance(self.component, Application): + return Scope.APP + if isinstance(self.component, Unit): + return Scope.UNIT + + @property + def secret_label_map(self) -> Dict[str, str]: + """Property storing secret mappings.""" + return self._secret_label_map + + @property + def static_secret_fields(self) -> List[str]: + """Re-definition of the property in a way that dynamically extended list is retrieved.""" + return self._secret_fields + + @property + def secret_fields(self) -> List[str]: + """Re-definition of the property in a way that dynamically extended list is retrieved.""" + return ( + self.static_secret_fields if self.static_secret_fields else self.current_secret_fields + ) + + @property + def current_secret_fields(self) -> List[str]: + """Helper method to get all currently existing secret fields (added statically or dynamically).""" + if not self.secrets_enabled: + return [] + + if len(self._model.relations[self.relation_name]) > 1: + raise ValueError(f"More than one peer relation on {self.relation_name}") + + relation = self._model.relations[self.relation_name][0] + fields = [] + + ignores = [SECRET_GROUPS.get_group("user"), SECRET_GROUPS.get_group("tls")] + for group in SECRET_GROUPS.groups(): + if group in ignores: + continue + if content := self._get_group_secret_contents(relation, group): + fields += list(content.keys()) + return list(set(fields) | set(self._new_secrets)) + + @dynamic_secrets_only + def set_secret( + self, + relation_id: int, + field: str, + value: str, + group_mapping: Optional[SecretGroup] = None, + ) -> None: + """Public interface method to add a Relation Data field specifically as a Juju Secret. + + Args: + relation_id: ID of the relation + field: The secret field that is to be added + value: The string value of the secret + group_mapping: The name of the "secret group", in case the field is to be added to an existing secret + """ + self._legacy_apply_on_update([field]) + + full_field = self._field_to_internal_name(field, group_mapping) + if self.secrets_enabled and full_field not in self.current_secret_fields: + self._new_secrets.append(full_field) + if self.valid_field_pattern(field, full_field): + self.update_relation_data(relation_id, {full_field: value}) + + # Unlike for set_secret(), there's no harm using this operation with static secrets + # The restricion is only added to keep the concept clear + @dynamic_secrets_only + def get_secret( + self, + relation_id: int, + field: str, + group_mapping: Optional[SecretGroup] = None, + ) -> Optional[str]: + """Public interface method to fetch secrets only.""" + self._legacy_apply_on_fetch() + + full_field = self._field_to_internal_name(field, group_mapping) + if ( + self.secrets_enabled + and full_field not in self.current_secret_fields + and field not in self.current_secret_fields + ): + return + if self.valid_field_pattern(field, full_field): + return self.fetch_my_relation_field(relation_id, full_field) + + @dynamic_secrets_only + def delete_secret( + self, + relation_id: int, + field: str, + group_mapping: Optional[SecretGroup] = None, + ) -> Optional[str]: + """Public interface method to delete secrets only.""" + self._legacy_apply_on_delete([field]) + + full_field = self._field_to_internal_name(field, group_mapping) + if self.secrets_enabled and full_field not in self.current_secret_fields: + logger.warning(f"Secret {field} from group {group_mapping} was not found") + return + + if self.valid_field_pattern(field, full_field): + self.delete_relation_data(relation_id, [full_field]) + + ########################################################################## + # Helpers + ########################################################################## + + @staticmethod + def _field_to_internal_name(field: str, group: Optional[SecretGroup]) -> str: + if not group or group == SECRET_GROUPS.EXTRA: + return field + return f"{field}{GROUP_SEPARATOR}{group}" + + @staticmethod + def _internal_name_to_field(name: str) -> Tuple[str, SecretGroup]: + parts = name.split(GROUP_SEPARATOR) + if not len(parts) > 1: + return (parts[0], SECRET_GROUPS.EXTRA) + secret_group = SECRET_GROUPS.get_group(parts[1]) + if not secret_group: + raise ValueError(f"Invalid secret field {name}") + return (parts[0], secret_group) + + def _group_secret_fields(self, secret_fields: List[str]) -> Dict[SecretGroup, List[str]]: + """Helper function to arrange secret mappings under their group. + + NOTE: All unrecognized items end up in the 'extra' secret bucket. + Make sure only secret fields are passed! + """ + secret_fieldnames_grouped = {} + for key in secret_fields: + field, group = self._internal_name_to_field(key) + secret_fieldnames_grouped.setdefault(group, []).append(field) + return secret_fieldnames_grouped + + def _content_for_secret_group( + self, content: Dict[str, str], secret_fields: Set[str], group_mapping: SecretGroup + ) -> Dict[str, str]: + """Select : pairs from input, that belong to this particular Secret group.""" + if group_mapping == SECRET_GROUPS.EXTRA: + return {k: v for k, v in content.items() if k in self.secret_fields} + return { + self._internal_name_to_field(k)[0]: v + for k, v in content.items() + if k in self.secret_fields + } + + def valid_field_pattern(self, field: str, full_field: str) -> bool: + """Check that no secret group is attempted to be used together without secrets being enabled. + + Secrets groups are impossible to use with versions that are not yet supporting secrets. + """ + if not self.secrets_enabled and full_field != field: + logger.error( + f"Can't access {full_field}: no secrets available (i.e. no secret groups either)." + ) + return False + return True + + ########################################################################## + # Backwards compatibility / Upgrades + ########################################################################## + # These functions are used to keep backwards compatibility on upgrades + # Policy: + # All data is kept intact until the first write operation. (This allows a minimal + # grace period during which rollbacks are fully safe. For more info see spec.) + # All data involves: + # - databag + # - secrets content + # - secret labels (!!!) + # Legacy functions must return None, and leave an equally consistent state whether + # they are executed or skipped (as a high enough versioned execution environment may + # not require so) + + # Full legacy stack for each operation + + def _legacy_apply_on_fetch(self) -> None: + """All legacy functions to be applied on fetch.""" + relation = self._model.relations[self.relation_name][0] + self._legacy_compat_generate_prev_labels() + self._legacy_compat_secret_uri_from_databag(relation) + + def _legacy_apply_on_update(self, fields) -> None: + """All legacy functions to be applied on update.""" + relation = self._model.relations[self.relation_name][0] + self._legacy_compat_generate_prev_labels() + self._legacy_compat_secret_uri_from_databag(relation) + self._legacy_migration_remove_secret_from_databag(relation, fields) + self._legacy_migration_remove_secret_field_name_from_databag(relation) + + def _legacy_apply_on_delete(self, fields) -> None: + """All legacy functions to be applied on delete.""" + relation = self._model.relations[self.relation_name][0] + self._legacy_compat_generate_prev_labels() + self._legacy_compat_secret_uri_from_databag(relation) + self._legacy_compat_check_deleted_label(relation, fields) + + # Compatibility + + @legacy_apply_from_version(18) + def _legacy_compat_check_deleted_label(self, relation, fields) -> None: + """Helper function for legacy behavior. + + As long as https://bugs.launchpad.net/juju/+bug/2028094 wasn't fixed, + we did not delete fields but rather kept them in the secret with a string value + expressing invalidity. This function is maintainnig that behavior when needed. + """ + if not self.deleted_label: + return + + current_data = self.fetch_my_relation_data([relation.id], fields) + if current_data is not None: + # Check if the secret we wanna delete actually exists + # Given the "deleted label", here we can't rely on the default mechanism (i.e. 'key not found') + if non_existent := (set(fields) & set(self.secret_fields)) - set( + current_data.get(relation.id, []) + ): + logger.debug( + "Non-existing secret %s was attempted to be removed.", + ", ".join(non_existent), + ) + + @legacy_apply_from_version(18) + def _legacy_compat_secret_uri_from_databag(self, relation) -> None: + """Fetching the secret URI from the databag, in case stored there.""" + self._legacy_secret_uri = relation.data[self.component].get( + self._generate_secret_field_name(), None + ) + + @legacy_apply_from_version(34) + def _legacy_compat_generate_prev_labels(self) -> None: + """Generator for legacy secret label names, for backwards compatibility. + + Secret label is part of the data that MUST be maintained across rolling upgrades. + In case there may be a change on a secret label, the old label must be recognized + after upgrades, and left intact until the first write operation -- when we roll over + to the new label. + + This function keeps "memory" of previously used secret labels. + NOTE: Return value takes decorator into account -- all 'legacy' functions may return `None` + + v0.34 (rev69): Fixing issue https://github.com/canonical/data-platform-libs/issues/155 + meant moving from '.' (i.e. 'mysql.app', 'mysql.unit') + to labels '..' (like 'peer.mysql.app') + """ + if self._legacy_labels: + return + + result = [] + members = [self._model.app.name] + if self.scope: + members.append(self.scope.value) + result.append(f"{'.'.join(members)}") + self._legacy_labels = result + + # Migration + + @legacy_apply_from_version(18) + def _legacy_migration_remove_secret_from_databag(self, relation, fields: List[str]) -> None: + """For Rolling Upgrades -- when moving from databag to secrets usage. + + Practically what happens here is to remove stuff from the databag that is + to be stored in secrets. + """ + if not self.secret_fields: + return + + secret_fields_passed = set(self.secret_fields) & set(fields) + for field in secret_fields_passed: + if self._fetch_relation_data_without_secrets(self.component, relation, [field]): + self._delete_relation_data_without_secrets(self.component, relation, [field]) + + @legacy_apply_from_version(18) + def _legacy_migration_remove_secret_field_name_from_databag(self, relation) -> None: + """Making sure that the old databag URI is gone. + + This action should not be executed more than once. + + There was a phase (before moving secrets usage to libs) when charms saved the peer + secret URI to the databag, and used this URI from then on to retrieve their secret. + When upgrading to charm versions using this library, we need to add a label to the + secret and access it via label from than on, and remove the old traces from the databag. + """ + # Nothing to do if 'internal-secret' is not in the databag + if not (relation.data[self.component].get(self._generate_secret_field_name())): + return + + # Making sure that the secret receives its label + # (This should have happened by the time we get here, rather an extra security measure.) + secret = self._get_relation_secret(relation.id) + + # Either app scope secret with leader executing, or unit scope secret + leader_or_unit_scope = self.component != self.local_app or self.local_unit.is_leader() + if secret and leader_or_unit_scope: + # Databag reference to the secret URI can be removed, now that it's labelled + relation.data[self.component].pop(self._generate_secret_field_name(), None) + + ########################################################################## + # Event handlers + ########################################################################## + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + pass + + def _on_secret_changed_event(self, event: SecretChangedEvent) -> None: + """Event emitted when the secret has changed.""" + pass + + ########################################################################## + # Overrides of Relation Data handling functions + ########################################################################## + + def _generate_secret_label( + self, relation_name: str, relation_id: int, group_mapping: SecretGroup + ) -> str: + members = [relation_name, self._model.app.name] + if self.scope: + members.append(self.scope.value) + if group_mapping != SECRET_GROUPS.EXTRA: + members.append(group_mapping) + return f"{'.'.join(members)}" + + def _generate_secret_field_name(self, group_mapping: SecretGroup = SECRET_GROUPS.EXTRA) -> str: + """Generate unique group_mappings for secrets within a relation context.""" + return f"{self.secret_field_name}" + + @juju_secrets_only + def _get_relation_secret( + self, + relation_id: int, + group_mapping: SecretGroup = SECRET_GROUPS.EXTRA, + relation_name: Optional[str] = None, + ) -> Optional[CachedSecret]: + """Retrieve a Juju Secret specifically for peer relations. + + In case this code may be executed within a rolling upgrade, and we may need to + migrate secrets from the databag to labels, we make sure to stick the correct + label on the secret, and clean up the local databag. + """ + if not relation_name: + relation_name = self.relation_name + + relation = self._model.get_relation(relation_name, relation_id) + if not relation: + return + + label = self._generate_secret_label(relation_name, relation_id, group_mapping) + + # URI or legacy label is only to applied when moving single legacy secret to a (new) label + if group_mapping == SECRET_GROUPS.EXTRA: + # Fetching the secret with fallback to URI (in case label is not yet known) + # Label would we "stuck" on the secret in case it is found + return self.secrets.get( + label, self._legacy_secret_uri, legacy_labels=self._legacy_labels + ) + return self.secrets.get(label) + + def _get_group_secret_contents( + self, + relation: Relation, + group: SecretGroup, + secret_fields: Union[Set[str], List[str]] = [], + ) -> Dict[str, str]: + """Helper function to retrieve collective, requested contents of a secret.""" + secret_fields = [self._internal_name_to_field(k)[0] for k in secret_fields] + result = super()._get_group_secret_contents(relation, group, secret_fields) + if self.deleted_label: + result = {key: result[key] for key in result if result[key] != self.deleted_label} + if self._additional_secret_group_mapping: + return {self._field_to_internal_name(key, group): result[key] for key in result} + return result + + @either_static_or_dynamic_secrets + def _fetch_my_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetch data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + return self._fetch_relation_data_with_secrets( + self.component, self.secret_fields, relation, fields + ) + + @either_static_or_dynamic_secrets + def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> None: + """Update data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + _, normal_fields = self._process_secret_fields( + relation, + self.secret_fields, + list(data), + self._add_or_update_relation_secrets, + data=data, + uri_to_databag=False, + ) + + normal_content = {k: v for k, v in data.items() if k in normal_fields} + self._update_relation_data_without_secrets(self.component, relation, normal_content) + + @either_static_or_dynamic_secrets + def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: + """Delete data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + if self.secret_fields and self.deleted_label: + + _, normal_fields = self._process_secret_fields( + relation, + self.secret_fields, + fields, + self._update_relation_secret, + data={field: self.deleted_label for field in fields}, + ) + else: + _, normal_fields = self._process_secret_fields( + relation, self.secret_fields, fields, self._delete_relation_secret, fields=fields + ) + self._delete_relation_data_without_secrets(self.component, relation, list(normal_fields)) + + def fetch_relation_data( + self, + relation_ids: Optional[List[int]] = None, + fields: Optional[List[str]] = None, + relation_name: Optional[str] = None, + ) -> Dict[int, Dict[str, str]]: + """This method makes no sense for a Peer Relation.""" + raise NotImplementedError( + "Peer Relation only supports 'self-side' fetch methods: " + "fetch_my_relation_data() and fetch_my_relation_field()" + ) + + def fetch_relation_field( + self, relation_id: int, field: str, relation_name: Optional[str] = None + ) -> Optional[str]: + """This method makes no sense for a Peer Relation.""" + raise NotImplementedError( + "Peer Relation only supports 'self-side' fetch methods: " + "fetch_my_relation_data() and fetch_my_relation_field()" + ) + + ########################################################################## + # Public functions -- inherited + ########################################################################## + + fetch_my_relation_data = Data.fetch_my_relation_data + fetch_my_relation_field = Data.fetch_my_relation_field + + +class DataPeerEventHandlers(RequirerEventHandlers): + """Requires-side of the relation.""" + + def __init__(self, charm: CharmBase, relation_data: RequirerData, unique_key: str = ""): + """Manager of base client relations.""" + super().__init__(charm, relation_data, unique_key) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + pass + + def _on_secret_changed_event(self, event: SecretChangedEvent) -> None: + """Event emitted when the secret has changed.""" + pass + + +class DataPeer(DataPeerData, DataPeerEventHandlers): + """Represents peer relations.""" + + def __init__( + self, + charm, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + additional_secret_group_mapping: Dict[str, str] = {}, + secret_field_name: Optional[str] = None, + deleted_label: Optional[str] = None, + unique_key: str = "", + ): + DataPeerData.__init__( + self, + charm.model, + relation_name, + extra_user_roles, + additional_secret_fields, + additional_secret_group_mapping, + secret_field_name, + deleted_label, + ) + DataPeerEventHandlers.__init__(self, charm, self, unique_key) + + +class DataPeerUnitData(DataPeerData): + """Unit data abstraction representation.""" + + SCOPE = Scope.UNIT + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + +class DataPeerUnit(DataPeerUnitData, DataPeerEventHandlers): + """Unit databag representation.""" + + def __init__( + self, + charm, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + additional_secret_group_mapping: Dict[str, str] = {}, + secret_field_name: Optional[str] = None, + deleted_label: Optional[str] = None, + unique_key: str = "", + ): + DataPeerData.__init__( + self, + charm.model, + relation_name, + extra_user_roles, + additional_secret_fields, + additional_secret_group_mapping, + secret_field_name, + deleted_label, + ) + DataPeerEventHandlers.__init__(self, charm, self, unique_key) + + +class DataPeerOtherUnitData(DataPeerUnitData): + """Unit data abstraction representation.""" + + def __init__(self, unit: Unit, *args, **kwargs): + super().__init__(*args, **kwargs) + self.local_unit = unit + self.component = unit + + def update_relation_data(self, relation_id: int, data: dict) -> None: + """This method makes no sense for a Other Peer Relation.""" + raise NotImplementedError("It's not possible to update data of another unit.") + + def delete_relation_data(self, relation_id: int, fields: List[str]) -> None: + """This method makes no sense for a Other Peer Relation.""" + raise NotImplementedError("It's not possible to delete data of another unit.") + + +class DataPeerOtherUnitEventHandlers(DataPeerEventHandlers): + """Requires-side of the relation.""" + + def __init__(self, charm: CharmBase, relation_data: DataPeerUnitData): + """Manager of base client relations.""" + unique_key = f"{relation_data.relation_name}-{relation_data.local_unit.name}" + super().__init__(charm, relation_data, unique_key=unique_key) + + +class DataPeerOtherUnit(DataPeerOtherUnitData, DataPeerOtherUnitEventHandlers): + """Unit databag representation for another unit than the executor.""" + + def __init__( + self, + unit: Unit, + charm: CharmBase, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + additional_secret_group_mapping: Dict[str, str] = {}, + secret_field_name: Optional[str] = None, + deleted_label: Optional[str] = None, + ): + DataPeerOtherUnitData.__init__( + self, + unit, + charm.model, + relation_name, + extra_user_roles, + additional_secret_fields, + additional_secret_group_mapping, + secret_field_name, + deleted_label, + ) + DataPeerOtherUnitEventHandlers.__init__(self, charm, self) + + +################################################################################ +# Cross-charm Relatoins Data Handling and Evenets +################################################################################ + +# Generic events + + +class ExtraRoleEvent(RelationEvent): + """Base class for data events.""" + + @property + def extra_user_roles(self) -> Optional[str]: + """Returns the extra user roles that were requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("extra-user-roles") + + +class RelationEventWithSecret(RelationEvent): + """Base class for Relation Events that need to handle secrets.""" + + @property + def _secrets(self) -> dict: + """Caching secrets to avoid fetching them each time a field is referrd. + + DON'T USE the encapsulated helper variable outside of this function + """ + if not hasattr(self, "_cached_secrets"): + self._cached_secrets = {} + return self._cached_secrets + + def _get_secret(self, group) -> Optional[Dict[str, str]]: + """Retrieving secrets.""" + if not self.app: + return + if not self._secrets.get(group): + self._secrets[group] = None + secret_field = f"{PROV_SECRET_PREFIX}{group}" + if secret_uri := self.relation.data[self.app].get(secret_field): + secret = self.framework.model.get_secret(id=secret_uri) + self._secrets[group] = secret.get_content() + return self._secrets[group] + + @property + def secrets_enabled(self): + """Is this Juju version allowing for Secrets usage?""" + return JujuVersion.from_environ().has_secrets + + +class AuthenticationEvent(RelationEventWithSecret): + """Base class for authentication fields for events. + + The amount of logic added here is not ideal -- but this was the only way to preserve + the interface when moving to Juju Secrets + """ + + @property + def username(self) -> Optional[str]: + """Returns the created username.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("user") + if secret: + return secret.get("username") + + return self.relation.data[self.relation.app].get("username") + + @property + def password(self) -> Optional[str]: + """Returns the password for the created user.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("user") + if secret: + return secret.get("password") + + return self.relation.data[self.relation.app].get("password") + + @property + def tls(self) -> Optional[str]: + """Returns whether TLS is configured.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("tls") + if secret: + return secret.get("tls") + + return self.relation.data[self.relation.app].get("tls") + + @property + def tls_ca(self) -> Optional[str]: + """Returns TLS CA.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("tls") + if secret: + return secret.get("tls-ca") + + return self.relation.data[self.relation.app].get("tls-ca") + + +# Database related events and fields + + +class DatabaseProvidesEvent(RelationEvent): + """Base class for database events.""" + + @property + def database(self) -> Optional[str]: + """Returns the database that was requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("database") + + +class DatabaseRequestedEvent(DatabaseProvidesEvent, ExtraRoleEvent): + """Event emitted when a new database is requested for use on this relation.""" + + @property + def external_node_connectivity(self) -> bool: + """Returns the requested external_node_connectivity field.""" + if not self.relation.app: + return False + + return ( + self.relation.data[self.relation.app].get("external-node-connectivity", "false") + == "true" + ) + + +class DatabaseProvidesEvents(CharmEvents): + """Database events. + + This class defines the events that the database can emit. + """ + + database_requested = EventSource(DatabaseRequestedEvent) + + +class DatabaseRequiresEvent(RelationEventWithSecret): + """Base class for database events.""" + + @property + def database(self) -> Optional[str]: + """Returns the database name.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("database") + + @property + def endpoints(self) -> Optional[str]: + """Returns a comma separated list of read/write endpoints. + + In VM charms, this is the primary's address. + In kubernetes charms, this is the service to the primary pod. + """ + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("endpoints") + + @property + def read_only_endpoints(self) -> Optional[str]: + """Returns a comma separated list of read only endpoints. + + In VM charms, this is the address of all the secondary instances. + In kubernetes charms, this is the service to all replica pod instances. + """ + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("read-only-endpoints") + + @property + def replset(self) -> Optional[str]: + """Returns the replicaset name. + + MongoDB only. + """ + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("replset") + + @property + def uris(self) -> Optional[str]: + """Returns the connection URIs. + + MongoDB, Redis, OpenSearch. + """ + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("user") + if secret: + return secret.get("uris") + + return self.relation.data[self.relation.app].get("uris") + + @property + def version(self) -> Optional[str]: + """Returns the version of the database. + + Version as informed by the database daemon. + """ + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("version") + + +class DatabaseCreatedEvent(AuthenticationEvent, DatabaseRequiresEvent): + """Event emitted when a new database is created for use on this relation.""" + + +class DatabaseEndpointsChangedEvent(AuthenticationEvent, DatabaseRequiresEvent): + """Event emitted when the read/write endpoints are changed.""" + + +class DatabaseReadOnlyEndpointsChangedEvent(AuthenticationEvent, DatabaseRequiresEvent): + """Event emitted when the read only endpoints are changed.""" + + +class DatabaseRequiresEvents(CharmEvents): + """Database events. + + This class defines the events that the database can emit. + """ + + database_created = EventSource(DatabaseCreatedEvent) + endpoints_changed = EventSource(DatabaseEndpointsChangedEvent) + read_only_endpoints_changed = EventSource(DatabaseReadOnlyEndpointsChangedEvent) + + +# Database Provider and Requires + + +class DatabaseProviderData(ProviderData): + """Provider-side data of the database relations.""" + + def __init__(self, model: Model, relation_name: str) -> None: + super().__init__(model, relation_name) + + def set_database(self, relation_id: int, database_name: str) -> None: + """Set database name. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + database_name: database name. + """ + self.update_relation_data(relation_id, {"database": database_name}) + + def set_endpoints(self, relation_id: int, connection_strings: str) -> None: + """Set database primary connections. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + In VM charms, only the primary's address should be passed as an endpoint. + In kubernetes charms, the service endpoint to the primary pod should be + passed as an endpoint. + + Args: + relation_id: the identifier for a particular relation. + connection_strings: database hosts and ports comma separated list. + """ + self.update_relation_data(relation_id, {"endpoints": connection_strings}) + + def set_read_only_endpoints(self, relation_id: int, connection_strings: str) -> None: + """Set database replicas connection strings. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + connection_strings: database hosts and ports comma separated list. + """ + self.update_relation_data(relation_id, {"read-only-endpoints": connection_strings}) + + def set_replset(self, relation_id: int, replset: str) -> None: + """Set replica set name in the application relation databag. + + MongoDB only. + + Args: + relation_id: the identifier for a particular relation. + replset: replica set name. + """ + self.update_relation_data(relation_id, {"replset": replset}) + + def set_uris(self, relation_id: int, uris: str) -> None: + """Set the database connection URIs in the application relation databag. + + MongoDB, Redis, and OpenSearch only. + + Args: + relation_id: the identifier for a particular relation. + uris: connection URIs. + """ + self.update_relation_data(relation_id, {"uris": uris}) + + def set_version(self, relation_id: int, version: str) -> None: + """Set the database version in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + version: database version. + """ + self.update_relation_data(relation_id, {"version": version}) + + def set_subordinated(self, relation_id: int) -> None: + """Raises the subordinated flag in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + """ + self.update_relation_data(relation_id, {"subordinated": "true"}) + + +class DatabaseProviderEventHandlers(EventHandlers): + """Provider-side of the database relation handlers.""" + + on = DatabaseProvidesEvents() # pyright: ignore [reportAssignmentType] + + def __init__( + self, charm: CharmBase, relation_data: DatabaseProviderData, unique_key: str = "" + ): + """Manager of base client relations.""" + super().__init__(charm, relation_data, unique_key) + # Just to calm down pyright, it can't parse that the same type is being used in the super() call above + self.relation_data = relation_data + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + # Leader only + if not self.relation_data.local_unit.is_leader(): + return + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Emit a database requested event if the setup key (database name and optional + # extra user roles) was added to the relation databag by the application. + if "database" in diff.added: + getattr(self.on, "database_requested").emit( + event.relation, app=event.app, unit=event.unit + ) + + +class DatabaseProvides(DatabaseProviderData, DatabaseProviderEventHandlers): + """Provider-side of the database relations.""" + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + DatabaseProviderData.__init__(self, charm.model, relation_name) + DatabaseProviderEventHandlers.__init__(self, charm, self) + + +class DatabaseRequirerData(RequirerData): + """Requirer-side of the database relation.""" + + def __init__( + self, + model: Model, + relation_name: str, + database_name: str, + extra_user_roles: Optional[str] = None, + relations_aliases: Optional[List[str]] = None, + additional_secret_fields: Optional[List[str]] = [], + external_node_connectivity: bool = False, + ): + """Manager of database client relations.""" + super().__init__(model, relation_name, extra_user_roles, additional_secret_fields) + self.database = database_name + self.relations_aliases = relations_aliases + self.external_node_connectivity = external_node_connectivity + + def is_postgresql_plugin_enabled(self, plugin: str, relation_index: int = 0) -> bool: + """Returns whether a plugin is enabled in the database. + + Args: + plugin: name of the plugin to check. + relation_index: optional relation index to check the database + (default: 0 - first relation). + + PostgreSQL only. + """ + # Psycopg 3 is imported locally to avoid the need of its package installation + # when relating to a database charm other than PostgreSQL. + import psycopg + + # Return False if no relation is established. + if len(self.relations) == 0: + return False + + relation_id = self.relations[relation_index].id + host = self.fetch_relation_field(relation_id, "endpoints") + + # Return False if there is no endpoint available. + if host is None: + return False + + host = host.split(":")[0] + + content = self.fetch_relation_data([relation_id], ["username", "password"]).get( + relation_id, {} + ) + user = content.get("username") + password = content.get("password") + + connection_string = ( + f"host='{host}' dbname='{self.database}' user='{user}' password='{password}'" + ) + try: + with psycopg.connect(connection_string) as connection: + with connection.cursor() as cursor: + cursor.execute( + "SELECT TRUE FROM pg_extension WHERE extname=%s::text;", (plugin,) + ) + return cursor.fetchone() is not None + except psycopg.Error as e: + logger.exception( + f"failed to check whether {plugin} plugin is enabled in the database: %s", str(e) + ) + return False + + +class DatabaseRequirerEventHandlers(RequirerEventHandlers): + """Requires-side of the relation.""" + + on = DatabaseRequiresEvents() # pyright: ignore [reportAssignmentType] + + def __init__( + self, charm: CharmBase, relation_data: DatabaseRequirerData, unique_key: str = "" + ): + """Manager of base client relations.""" + super().__init__(charm, relation_data, unique_key) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + # Define custom event names for each alias. + if self.relation_data.relations_aliases: + # Ensure the number of aliases does not exceed the maximum + # of connections allowed in the specific relation. + relation_connection_limit = self.charm.meta.requires[ + self.relation_data.relation_name + ].limit + if len(self.relation_data.relations_aliases) != relation_connection_limit: + raise ValueError( + f"The number of aliases must match the maximum number of connections allowed in the relation. " + f"Expected {relation_connection_limit}, got {len(self.relation_data.relations_aliases)}" + ) + + if self.relation_data.relations_aliases: + for relation_alias in self.relation_data.relations_aliases: + self.on.define_event(f"{relation_alias}_database_created", DatabaseCreatedEvent) + self.on.define_event( + f"{relation_alias}_endpoints_changed", DatabaseEndpointsChangedEvent + ) + self.on.define_event( + f"{relation_alias}_read_only_endpoints_changed", + DatabaseReadOnlyEndpointsChangedEvent, + ) + + def _on_secret_changed_event(self, event: SecretChangedEvent): + """Event notifying about a new value of a secret.""" + pass + + def _assign_relation_alias(self, relation_id: int) -> None: + """Assigns an alias to a relation. + + This function writes in the unit data bag. + + Args: + relation_id: the identifier for a particular relation. + """ + # If no aliases were provided, return immediately. + if not self.relation_data.relations_aliases: + return + + # Return if an alias was already assigned to this relation + # (like when there are more than one unit joining the relation). + relation = self.charm.model.get_relation(self.relation_data.relation_name, relation_id) + if relation and relation.data[self.relation_data.local_unit].get("alias"): + return + + # Retrieve the available aliases (the ones that weren't assigned to any relation). + available_aliases = self.relation_data.relations_aliases[:] + for relation in self.charm.model.relations[self.relation_data.relation_name]: + alias = relation.data[self.relation_data.local_unit].get("alias") + if alias: + logger.debug("Alias %s was already assigned to relation %d", alias, relation.id) + available_aliases.remove(alias) + + # Set the alias in the unit relation databag of the specific relation. + relation = self.charm.model.get_relation(self.relation_data.relation_name, relation_id) + if relation: + relation.data[self.relation_data.local_unit].update({"alias": available_aliases[0]}) + + # We need to set relation alias also on the application level so, + # it will be accessible in show-unit juju command, executed for a consumer application unit + if self.relation_data.local_unit.is_leader(): + self.relation_data.update_relation_data(relation_id, {"alias": available_aliases[0]}) + + def _emit_aliased_event(self, event: RelationChangedEvent, event_name: str) -> None: + """Emit an aliased event to a particular relation if it has an alias. + + Args: + event: the relation changed event that was received. + event_name: the name of the event to emit. + """ + alias = self._get_relation_alias(event.relation.id) + if alias: + getattr(self.on, f"{alias}_{event_name}").emit( + event.relation, app=event.app, unit=event.unit + ) + + def _get_relation_alias(self, relation_id: int) -> Optional[str]: + """Returns the relation alias. + + Args: + relation_id: the identifier for a particular relation. + + Returns: + the relation alias or None if the relation was not found. + """ + for relation in self.charm.model.relations[self.relation_data.relation_name]: + if relation.id == relation_id: + return relation.data[self.relation_data.local_unit].get("alias") + return None + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the database relation is created.""" + super()._on_relation_created_event(event) + + # If relations aliases were provided, assign one to the relation. + self._assign_relation_alias(event.relation.id) + + # Sets both database and extra user roles in the relation + # if the roles are provided. Otherwise, sets only the database. + if not self.relation_data.local_unit.is_leader(): + return + + event_data = {"database": self.relation_data.database} + + if self.relation_data.extra_user_roles: + event_data["extra-user-roles"] = self.relation_data.extra_user_roles + + # set external-node-connectivity field + if self.relation_data.external_node_connectivity: + event_data["external-node-connectivity"] = "true" + + self.relation_data.update_relation_data(event.relation.id, event_data) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the database relation has changed.""" + is_subordinate = False + remote_unit_data = None + for key in event.relation.data.keys(): + if isinstance(key, Unit) and not key.name.startswith(self.charm.app.name): + remote_unit_data = event.relation.data[key] + elif isinstance(key, Application) and key.name != self.charm.app.name: + is_subordinate = event.relation.data[key].get("subordinated") == "true" + + if is_subordinate: + if not remote_unit_data: + return + + if remote_unit_data.get("state") != "ready": + return + + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Register all new secrets with their labels + if any(newval for newval in diff.added if self.relation_data._is_secret_field(newval)): + self.relation_data._register_secrets_to_relation(event.relation, diff.added) + + # Check if the database is created + # (the database charm shared the credentials). + secret_field_user = self.relation_data._generate_secret_field_name(SECRET_GROUPS.USER) + if ( + "username" in diff.added and "password" in diff.added + ) or secret_field_user in diff.added: + # Emit the default event (the one without an alias). + logger.info("database created at %s", datetime.now()) + getattr(self.on, "database_created").emit( + event.relation, app=event.app, unit=event.unit + ) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "database_created") + + # To avoid unnecessary application restarts do not trigger + # “endpoints_changed“ event if “database_created“ is triggered. + return + + # Emit an endpoints changed event if the database + # added or changed this info in the relation databag. + if "endpoints" in diff.added or "endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("endpoints changed on %s", datetime.now()) + getattr(self.on, "endpoints_changed").emit( + event.relation, app=event.app, unit=event.unit + ) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "endpoints_changed") + + # To avoid unnecessary application restarts do not trigger + # “read_only_endpoints_changed“ event if “endpoints_changed“ is triggered. + return + + # Emit a read only endpoints changed event if the database + # added or changed this info in the relation databag. + if "read-only-endpoints" in diff.added or "read-only-endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("read-only-endpoints changed on %s", datetime.now()) + getattr(self.on, "read_only_endpoints_changed").emit( + event.relation, app=event.app, unit=event.unit + ) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "read_only_endpoints_changed") + + +class DatabaseRequires(DatabaseRequirerData, DatabaseRequirerEventHandlers): + """Provider-side of the database relations.""" + + def __init__( + self, + charm: CharmBase, + relation_name: str, + database_name: str, + extra_user_roles: Optional[str] = None, + relations_aliases: Optional[List[str]] = None, + additional_secret_fields: Optional[List[str]] = [], + external_node_connectivity: bool = False, + ): + DatabaseRequirerData.__init__( + self, + charm.model, + relation_name, + database_name, + extra_user_roles, + relations_aliases, + additional_secret_fields, + external_node_connectivity, + ) + DatabaseRequirerEventHandlers.__init__(self, charm, self) + + +################################################################################ +# Charm-specific Relations Data and Events +################################################################################ + +# Kafka Events + + +class KafkaProvidesEvent(RelationEvent): + """Base class for Kafka events.""" + + @property + def topic(self) -> Optional[str]: + """Returns the topic that was requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("topic") + + @property + def consumer_group_prefix(self) -> Optional[str]: + """Returns the consumer-group-prefix that was requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("consumer-group-prefix") + + +class TopicRequestedEvent(KafkaProvidesEvent, ExtraRoleEvent): + """Event emitted when a new topic is requested for use on this relation.""" + + +class KafkaProvidesEvents(CharmEvents): + """Kafka events. + + This class defines the events that the Kafka can emit. + """ + + topic_requested = EventSource(TopicRequestedEvent) + + +class KafkaRequiresEvent(RelationEvent): + """Base class for Kafka events.""" + + @property + def topic(self) -> Optional[str]: + """Returns the topic.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("topic") + + @property + def bootstrap_server(self) -> Optional[str]: + """Returns a comma-separated list of broker uris.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("endpoints") + + @property + def consumer_group_prefix(self) -> Optional[str]: + """Returns the consumer-group-prefix.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("consumer-group-prefix") + + @property + def zookeeper_uris(self) -> Optional[str]: + """Returns a comma separated list of Zookeeper uris.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("zookeeper-uris") + + +class TopicCreatedEvent(AuthenticationEvent, KafkaRequiresEvent): + """Event emitted when a new topic is created for use on this relation.""" + + +class BootstrapServerChangedEvent(AuthenticationEvent, KafkaRequiresEvent): + """Event emitted when the bootstrap server is changed.""" + + +class KafkaRequiresEvents(CharmEvents): + """Kafka events. + + This class defines the events that the Kafka can emit. + """ + + topic_created = EventSource(TopicCreatedEvent) + bootstrap_server_changed = EventSource(BootstrapServerChangedEvent) + + +# Kafka Provides and Requires + + +class KafkaProviderData(ProviderData): + """Provider-side of the Kafka relation.""" + + def __init__(self, model: Model, relation_name: str) -> None: + super().__init__(model, relation_name) + + def set_topic(self, relation_id: int, topic: str) -> None: + """Set topic name in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + topic: the topic name. + """ + self.update_relation_data(relation_id, {"topic": topic}) + + def set_bootstrap_server(self, relation_id: int, bootstrap_server: str) -> None: + """Set the bootstrap server in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + bootstrap_server: the bootstrap server address. + """ + self.update_relation_data(relation_id, {"endpoints": bootstrap_server}) + + def set_consumer_group_prefix(self, relation_id: int, consumer_group_prefix: str) -> None: + """Set the consumer group prefix in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + consumer_group_prefix: the consumer group prefix string. + """ + self.update_relation_data(relation_id, {"consumer-group-prefix": consumer_group_prefix}) + + def set_zookeeper_uris(self, relation_id: int, zookeeper_uris: str) -> None: + """Set the zookeeper uris in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + zookeeper_uris: comma-separated list of ZooKeeper server uris. + """ + self.update_relation_data(relation_id, {"zookeeper-uris": zookeeper_uris}) + + +class KafkaProviderEventHandlers(EventHandlers): + """Provider-side of the Kafka relation.""" + + on = KafkaProvidesEvents() # pyright: ignore [reportAssignmentType] + + def __init__(self, charm: CharmBase, relation_data: KafkaProviderData) -> None: + super().__init__(charm, relation_data) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + # Leader only + if not self.relation_data.local_unit.is_leader(): + return + + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Emit a topic requested event if the setup key (topic name and optional + # extra user roles) was added to the relation databag by the application. + if "topic" in diff.added: + getattr(self.on, "topic_requested").emit( + event.relation, app=event.app, unit=event.unit + ) + + +class KafkaProvides(KafkaProviderData, KafkaProviderEventHandlers): + """Provider-side of the Kafka relation.""" + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + KafkaProviderData.__init__(self, charm.model, relation_name) + KafkaProviderEventHandlers.__init__(self, charm, self) + + +class KafkaRequirerData(RequirerData): + """Requirer-side of the Kafka relation.""" + + def __init__( + self, + model: Model, + relation_name: str, + topic: str, + extra_user_roles: Optional[str] = None, + consumer_group_prefix: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ): + """Manager of Kafka client relations.""" + super().__init__(model, relation_name, extra_user_roles, additional_secret_fields) + self.topic = topic + self.consumer_group_prefix = consumer_group_prefix or "" + + @property + def topic(self): + """Topic to use in Kafka.""" + return self._topic + + @topic.setter + def topic(self, value): + # Avoid wildcards + if value == "*": + raise ValueError(f"Error on topic '{value}', cannot be a wildcard.") + self._topic = value + + +class KafkaRequirerEventHandlers(RequirerEventHandlers): + """Requires-side of the Kafka relation.""" + + on = KafkaRequiresEvents() # pyright: ignore [reportAssignmentType] + + def __init__(self, charm: CharmBase, relation_data: KafkaRequirerData) -> None: + super().__init__(charm, relation_data) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the Kafka relation is created.""" + super()._on_relation_created_event(event) + + if not self.relation_data.local_unit.is_leader(): + return + + # Sets topic, extra user roles, and "consumer-group-prefix" in the relation + relation_data = {"topic": self.relation_data.topic} + + if self.relation_data.extra_user_roles: + relation_data["extra-user-roles"] = self.relation_data.extra_user_roles + + if self.relation_data.consumer_group_prefix: + relation_data["consumer-group-prefix"] = self.relation_data.consumer_group_prefix + + self.relation_data.update_relation_data(event.relation.id, relation_data) + + def _on_secret_changed_event(self, event: SecretChangedEvent): + """Event notifying about a new value of a secret.""" + pass + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the Kafka relation has changed.""" + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Check if the topic is created + # (the Kafka charm shared the credentials). + + # Register all new secrets with their labels + if any(newval for newval in diff.added if self.relation_data._is_secret_field(newval)): + self.relation_data._register_secrets_to_relation(event.relation, diff.added) + + secret_field_user = self.relation_data._generate_secret_field_name(SECRET_GROUPS.USER) + if ( + "username" in diff.added and "password" in diff.added + ) or secret_field_user in diff.added: + # Emit the default event (the one without an alias). + logger.info("topic created at %s", datetime.now()) + getattr(self.on, "topic_created").emit(event.relation, app=event.app, unit=event.unit) + + # To avoid unnecessary application restarts do not trigger + # “endpoints_changed“ event if “topic_created“ is triggered. + return + + # Emit an endpoints (bootstrap-server) changed event if the Kafka endpoints + # added or changed this info in the relation databag. + if "endpoints" in diff.added or "endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("endpoints changed on %s", datetime.now()) + getattr(self.on, "bootstrap_server_changed").emit( + event.relation, app=event.app, unit=event.unit + ) # here check if this is the right design + return + + +class KafkaRequires(KafkaRequirerData, KafkaRequirerEventHandlers): + """Provider-side of the Kafka relation.""" + + def __init__( + self, + charm: CharmBase, + relation_name: str, + topic: str, + extra_user_roles: Optional[str] = None, + consumer_group_prefix: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ) -> None: + KafkaRequirerData.__init__( + self, + charm.model, + relation_name, + topic, + extra_user_roles, + consumer_group_prefix, + additional_secret_fields, + ) + KafkaRequirerEventHandlers.__init__(self, charm, self) + + +# Opensearch related events + + +class OpenSearchProvidesEvent(RelationEvent): + """Base class for OpenSearch events.""" + + @property + def index(self) -> Optional[str]: + """Returns the index that was requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("index") + + +class IndexRequestedEvent(OpenSearchProvidesEvent, ExtraRoleEvent): + """Event emitted when a new index is requested for use on this relation.""" + + +class OpenSearchProvidesEvents(CharmEvents): + """OpenSearch events. + + This class defines the events that OpenSearch can emit. + """ + + index_requested = EventSource(IndexRequestedEvent) + + +class OpenSearchRequiresEvent(DatabaseRequiresEvent): + """Base class for OpenSearch requirer events.""" + + +class IndexCreatedEvent(AuthenticationEvent, OpenSearchRequiresEvent): + """Event emitted when a new index is created for use on this relation.""" + + +class OpenSearchRequiresEvents(CharmEvents): + """OpenSearch events. + + This class defines the events that the opensearch requirer can emit. + """ + + index_created = EventSource(IndexCreatedEvent) + endpoints_changed = EventSource(DatabaseEndpointsChangedEvent) + authentication_updated = EventSource(AuthenticationEvent) + + +# OpenSearch Provides and Requires Objects + + +class OpenSearchProvidesData(ProviderData): + """Provider-side of the OpenSearch relation.""" + + def __init__(self, model: Model, relation_name: str) -> None: + super().__init__(model, relation_name) + + def set_index(self, relation_id: int, index: str) -> None: + """Set the index in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + index: the index as it is _created_ on the provider charm. This needn't match the + requested index, and can be used to present a different index name if, for example, + the requested index is invalid. + """ + self.update_relation_data(relation_id, {"index": index}) + + def set_endpoints(self, relation_id: int, endpoints: str) -> None: + """Set the endpoints in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + endpoints: the endpoint addresses for opensearch nodes. + """ + self.update_relation_data(relation_id, {"endpoints": endpoints}) + + def set_version(self, relation_id: int, version: str) -> None: + """Set the opensearch version in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + version: database version. + """ + self.update_relation_data(relation_id, {"version": version}) + + +class OpenSearchProvidesEventHandlers(EventHandlers): + """Provider-side of the OpenSearch relation.""" + + on = OpenSearchProvidesEvents() # pyright: ignore[reportAssignmentType] + + def __init__(self, charm: CharmBase, relation_data: OpenSearchProvidesData) -> None: + super().__init__(charm, relation_data) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + # Leader only + if not self.relation_data.local_unit.is_leader(): + return + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Emit an index requested event if the setup key (index name and optional extra user roles) + # have been added to the relation databag by the application. + if "index" in diff.added: + getattr(self.on, "index_requested").emit( + event.relation, app=event.app, unit=event.unit + ) + + +class OpenSearchProvides(OpenSearchProvidesData, OpenSearchProvidesEventHandlers): + """Provider-side of the OpenSearch relation.""" + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + OpenSearchProvidesData.__init__(self, charm.model, relation_name) + OpenSearchProvidesEventHandlers.__init__(self, charm, self) + + +class OpenSearchRequiresData(RequirerData): + """Requires data side of the OpenSearch relation.""" + + def __init__( + self, + model: Model, + relation_name: str, + index: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ): + """Manager of OpenSearch client relations.""" + super().__init__(model, relation_name, extra_user_roles, additional_secret_fields) + self.index = index + + +class OpenSearchRequiresEventHandlers(RequirerEventHandlers): + """Requires events side of the OpenSearch relation.""" + + on = OpenSearchRequiresEvents() # pyright: ignore[reportAssignmentType] + + def __init__(self, charm: CharmBase, relation_data: OpenSearchRequiresData) -> None: + super().__init__(charm, relation_data) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the OpenSearch relation is created.""" + super()._on_relation_created_event(event) + + if not self.relation_data.local_unit.is_leader(): + return + + # Sets both index and extra user roles in the relation if the roles are provided. + # Otherwise, sets only the index. + data = {"index": self.relation_data.index} + if self.relation_data.extra_user_roles: + data["extra-user-roles"] = self.relation_data.extra_user_roles + + self.relation_data.update_relation_data(event.relation.id, data) + + def _on_secret_changed_event(self, event: SecretChangedEvent): + """Event notifying about a new value of a secret.""" + if not event.secret.label: + return + + relation = self.relation_data._relation_from_secret_label(event.secret.label) + if not relation: + logging.info( + f"Received secret {event.secret.label} but couldn't parse, seems irrelevant" + ) + return + + if relation.app == self.charm.app: + logging.info("Secret changed event ignored for Secret Owner") + + remote_unit = None + for unit in relation.units: + if unit.app != self.charm.app: + remote_unit = unit + + logger.info("authentication updated") + getattr(self.on, "authentication_updated").emit( + relation, app=relation.app, unit=remote_unit + ) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the OpenSearch relation has changed. + + This event triggers individual custom events depending on the changing relation. + """ + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Register all new secrets with their labels + if any(newval for newval in diff.added if self.relation_data._is_secret_field(newval)): + self.relation_data._register_secrets_to_relation(event.relation, diff.added) + + secret_field_user = self.relation_data._generate_secret_field_name(SECRET_GROUPS.USER) + secret_field_tls = self.relation_data._generate_secret_field_name(SECRET_GROUPS.TLS) + updates = {"username", "password", "tls", "tls-ca", secret_field_user, secret_field_tls} + if len(set(diff._asdict().keys()) - updates) < len(diff): + logger.info("authentication updated at: %s", datetime.now()) + getattr(self.on, "authentication_updated").emit( + event.relation, app=event.app, unit=event.unit + ) + + # Check if the index is created + # (the OpenSearch charm shares the credentials). + if ( + "username" in diff.added and "password" in diff.added + ) or secret_field_user in diff.added: + # Emit the default event (the one without an alias). + logger.info("index created at: %s", datetime.now()) + getattr(self.on, "index_created").emit(event.relation, app=event.app, unit=event.unit) + + # To avoid unnecessary application restarts do not trigger + # “endpoints_changed“ event if “index_created“ is triggered. + return + + # Emit a endpoints changed event if the OpenSearch application added or changed this info + # in the relation databag. + if "endpoints" in diff.added or "endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("endpoints changed on %s", datetime.now()) + getattr(self.on, "endpoints_changed").emit( + event.relation, app=event.app, unit=event.unit + ) # here check if this is the right design + return + + +class OpenSearchRequires(OpenSearchRequiresData, OpenSearchRequiresEventHandlers): + """Requires-side of the OpenSearch relation.""" + + def __init__( + self, + charm: CharmBase, + relation_name: str, + index: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ) -> None: + OpenSearchRequiresData.__init__( + self, + charm.model, + relation_name, + index, + extra_user_roles, + additional_secret_fields, + ) + OpenSearchRequiresEventHandlers.__init__(self, charm, self) diff --git a/lib/charms/mongodb/v0/config_server_interface.py b/lib/charms/mongodb/v0/config_server_interface.py new file mode 100644 index 00000000..ba515f3e --- /dev/null +++ b/lib/charms/mongodb/v0/config_server_interface.py @@ -0,0 +1,494 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""In this class, we manage relations between config-servers and shards. + +This class handles the sharing of secrets between sharded components, adding shards, and removing +shards. +""" +import logging +from typing import Optional + +from charms.data_platform_libs.v0.data_interfaces import ( + DatabaseProvides, + DatabaseRequires, +) +from charms.mongodb.v1.mongos import MongosConnection +from ops.charm import CharmBase, EventBase, RelationBrokenEvent +from ops.framework import Object +from ops.model import ( + ActiveStatus, + BlockedStatus, + MaintenanceStatus, + StatusBase, + WaitingStatus, +) + +from config import Config + +logger = logging.getLogger(__name__) +KEYFILE_KEY = "key-file" +KEY_FILE = "keyFile" +HOSTS_KEY = "host" +CONFIG_SERVER_DB_KEY = "config-server-db" +MONGOS_SOCKET_URI_FMT = "%2Fvar%2Fsnap%2Fcharmed-mongodb%2Fcommon%2Fvar%2Fmongodb-27018.sock" +INT_TLS_CA_KEY = f"int-{Config.TLS.SECRET_CA_LABEL}" + +# The unique Charmhub library identifier, never change it +LIBID = "58ad1ccca4974932ba22b97781b9b2a0" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 9 + + +class ClusterProvider(Object): + """Manage relations between the config server and mongos router on the config-server side.""" + + def __init__( + self, charm: CharmBase, relation_name: str = Config.Relations.CLUSTER_RELATIONS_NAME + ) -> None: + """Constructor for ShardingProvider object.""" + self.relation_name = relation_name + self.charm = charm + self.database_provides = DatabaseProvides(self.charm, relation_name=self.relation_name) + + super().__init__(charm, self.relation_name) + self.framework.observe( + charm.on[self.relation_name].relation_changed, self._on_relation_changed + ) + + self.framework.observe( + charm.on[self.relation_name].relation_departed, + self.charm.check_relation_broken_or_scale_down, + ) + self.framework.observe( + charm.on[self.relation_name].relation_broken, self._on_relation_broken + ) + + def pass_hook_checks(self, event: EventBase) -> bool: + """Runs the pre-hooks checks for ClusterProvider, returns True if all pass.""" + if not self.charm.db_initialised: + logger.info("Deferring %s. db is not initialised.", type(event)) + event.defer() + return False + + if not self.is_valid_mongos_integration(): + logger.info( + "Skipping %s. ClusterProvider is only be executed by config-server", type(event) + ) + return False + + if not self.charm.unit.is_leader(): + return False + + if self.charm.upgrade_in_progress: + logger.warning( + "Processing mongos applications is not supported during an upgrade. The charm may be in a broken, unrecoverable state." + ) + event.defer() + return False + + return True + + def is_valid_mongos_integration(self) -> bool: + """Returns true if the integration to mongos is valid.""" + is_integrated_to_mongos = len( + self.charm.model.relations[Config.Relations.CLUSTER_RELATIONS_NAME] + ) + + if not self.charm.is_role(Config.Role.CONFIG_SERVER) and is_integrated_to_mongos: + return False + + return True + + def _on_relation_changed(self, event) -> None: + """Handles providing mongos with KeyFile and hosts.""" + if not self.pass_hook_checks(event): + if not self.is_valid_mongos_integration(): + self.charm.status.set_and_share_status( + BlockedStatus( + "Relation to mongos not supported, config role must be config-server" + ) + ) + logger.info("Skipping relation joined event: hook checks did not pass") + return + + config_server_db = self.generate_config_server_db() + + # create user and set secrets for mongos relation + self.charm.client_relations.oversee_users(None, None) + + relation_data = { + KEYFILE_KEY: self.charm.get_secret( + Config.Relations.APP_SCOPE, Config.Secrets.SECRET_KEYFILE_NAME + ), + CONFIG_SERVER_DB_KEY: config_server_db, + } + + # if tls enabled + int_tls_ca = self.charm.tls.get_tls_secret( + internal=True, label_name=Config.TLS.SECRET_CA_LABEL + ) + if int_tls_ca: + relation_data[INT_TLS_CA_KEY] = int_tls_ca + + self.database_provides.update_relation_data(event.relation.id, relation_data) + + def _on_relation_broken(self, event) -> None: + if self.charm.upgrade_in_progress: + logger.warning( + "Removing integration to mongos is not supported during an upgrade. The charm may be in a broken, unrecoverable state." + ) + + # Only relation_deparated events can check if scaling down + departed_relation_id = event.relation.id + if not self.charm.has_departed_run(departed_relation_id): + logger.info( + "Deferring, must wait for relation departed hook to decide if relation should be removed." + ) + event.defer() + return + + if not self.pass_hook_checks(event): + logger.info("Skipping relation broken event: hook checks did not pass") + return + + if not self.charm.proceed_on_broken_event(event): + logger.info("Skipping relation broken event, broken event due to scale down") + return + + self.charm.client_relations.oversee_users(departed_relation_id, event) + + def update_config_server_db(self, event): + """Provides related mongos applications with new config server db.""" + if not self.pass_hook_checks(event): + logger.info("Skipping update_config_server_db: hook checks did not pass") + return + + config_server_db = self.generate_config_server_db() + + if not self.charm.unit.is_leader(): + return + + for relation in self.charm.model.relations[self.relation_name]: + self.database_provides.update_relation_data( + relation.id, + { + CONFIG_SERVER_DB_KEY: config_server_db, + }, + ) + + def generate_config_server_db(self) -> str: + """Generates the config server database for mongos to connect to.""" + replica_set_name = self.charm.app.name + hosts = [] + for host in self.charm.app_hosts: + hosts.append(f"{host}:{Config.MONGODB_PORT}") + + hosts = ",".join(hosts) + return f"{replica_set_name}/{hosts}" + + def update_ca_secret(self, new_ca: str) -> None: + """Updates the new CA for all related shards.""" + for relation in self.charm.model.relations[self.relation_name]: + if new_ca is None: + self.database_provides.delete_relation_data(relation.id, {INT_TLS_CA_KEY: new_ca}) + else: + self.database_provides.update_relation_data(relation.id, {INT_TLS_CA_KEY: new_ca}) + + +class ClusterRequirer(Object): + """Manage relations between the config server and mongos router on the mongos side.""" + + def __init__( + self, + charm: CharmBase, + relation_name: str = Config.Relations.CLUSTER_RELATIONS_NAME, + substrate: str = Config.Substrate.VM, + ) -> None: + """Constructor for ShardingProvider object.""" + self.substrate = substrate + self.relation_name = relation_name + self.charm = charm + self.database_requires = DatabaseRequires( + self.charm, + relation_name=self.relation_name, + relations_aliases=[self.relation_name], + database_name=self.charm.database, + extra_user_roles=self.charm.extra_user_roles, + additional_secret_fields=[KEYFILE_KEY, INT_TLS_CA_KEY], + ) + + super().__init__(charm, self.relation_name) + self.framework.observe( + charm.on[self.relation_name].relation_created, + self.database_requires._on_relation_created_event, + ) + + self.framework.observe( + self.database_requires.on.database_created, self._on_database_created + ) + self.framework.observe( + charm.on[self.relation_name].relation_changed, self._on_relation_changed + ) + self.framework.observe( + charm.on[self.relation_name].relation_departed, + self.charm.check_relation_broken_or_scale_down, + ) + self.framework.observe( + charm.on[self.relation_name].relation_broken, self._on_relation_broken + ) + + def _on_database_created(self, event) -> None: + if self.charm.upgrade_in_progress: + logger.warning( + "Processing client applications is not supported during an upgrade. The charm may be in a broken, unrecoverable state." + ) + event.defer() + return + + if not self.charm.unit.is_leader(): + return + + logger.info("Database and user created for mongos application") + self.charm.set_secret(Config.Relations.APP_SCOPE, Config.Secrets.USERNAME, event.username) + self.charm.set_secret(Config.Relations.APP_SCOPE, Config.Secrets.PASSWORD, event.password) + + # K8s charm have a 1:Many client scheme and share connection info in a different manner. + if self.substrate == Config.Substrate.VM: + self.charm.share_connection_info() + + def _on_relation_changed(self, event) -> None: + """Starts/restarts monogs with config server information.""" + if not self.pass_hook_checks(event): + logger.info("pre-hook checks did not pass, not executing event") + return + + key_file_contents = self.database_requires.fetch_relation_field( + event.relation.id, KEYFILE_KEY + ) + config_server_db_uri = self.database_requires.fetch_relation_field( + event.relation.id, CONFIG_SERVER_DB_KEY + ) + if not key_file_contents or not config_server_db_uri: + self.charm.status.set_and_share_status( + WaitingStatus("Waiting for secrets from config-server") + ) + return + + updated_keyfile = self.update_keyfile(key_file_contents=key_file_contents) + updated_config = self.update_config_server_db(config_server_db=config_server_db_uri) + + # avoid restarting mongos when possible + if not updated_keyfile and not updated_config and self.is_mongos_running(): + return + + # mongos is not available until it is using new secrets + logger.info("Restarting mongos with new secrets") + self.charm.status.set_and_share_status(MaintenanceStatus("starting mongos")) + self.charm.restart_charm_services() + + # restart on high loaded databases can be very slow (e.g. up to 10-20 minutes). + if not self.is_mongos_running(): + logger.info("mongos has not started, deferring") + self.charm.status.set_and_share_status(WaitingStatus("Waiting for mongos to start")) + event.defer() + return + + self.charm.status.set_and_share_status(ActiveStatus()) + self.charm.mongos_intialised = True + + def _on_relation_broken(self, event: RelationBrokenEvent) -> None: + # Only relation_deparated events can check if scaling down + if not self.charm.has_departed_run(event.relation.id): + logger.info( + "Deferring, must wait for relation departed hook to decide if relation should be removed." + ) + event.defer() + return + + if not self.charm.proceed_on_broken_event(event): + logger.info("Skipping relation broken event, broken event due to scale down") + return + + self.charm.stop_mongos_service() + logger.info("Stopped mongos daemon") + + if not self.charm.unit.is_leader(): + return + + logger.info("Database and user removed for mongos application") + self.charm.remove_secret(Config.Relations.APP_SCOPE, Config.Secrets.USERNAME) + self.charm.remove_secret(Config.Relations.APP_SCOPE, Config.Secrets.PASSWORD) + + # K8s charm have a 1:Many client scheme and share connection info in a different manner. + if self.substrate == Config.Substrate.VM: + self.charm.remove_connection_info() + + # BEGIN: helper functions + def pass_hook_checks(self, event): + """Runs the pre-hooks checks for ClusterRequirer, returns True if all pass.""" + if self.is_mongos_tls_missing(): + logger.info( + "Deferring %s. Config-server uses TLS, but mongos does not. Please synchronise encryption methods.", + str(type(event)), + ) + event.defer() + return False + + if self.is_config_server_tls_missing(): + logger.info( + "Deferring %s. mongos uses TLS, but config-server does not. Please synchronise encryption methods.", + str(type(event)), + ) + event.defer() + return False + + if not self.is_ca_compatible(): + logger.info( + "Deferring %s. mongos is integrated to a different CA than the config server. Please use the same CA for all cluster components.", + str(type(event)), + ) + + event.defer() + return False + + if self.charm.upgrade_in_progress: + logger.warning( + "Processing client applications is not supported during an upgrade. The charm may be in a broken, unrecoverable state." + ) + event.defer() + return False + + return True + + def is_mongos_running(self) -> bool: + """Returns true if mongos service is running.""" + connection_uri = f"mongodb://{self.charm.get_mongos_host()}" + + # use the mongos port for k8s charms and external connections on VM + if self.charm.is_external_client or self.substrate == Config.K8S_SUBSTRATE: + connection_uri = connection_uri + f":{Config.MONGOS_PORT}" + + with MongosConnection(None, connection_uri) as mongo: + return mongo.is_ready + + def update_config_server_db(self, config_server_db) -> bool: + """Updates config server str when necessary.""" + if self.charm.config_server_db == config_server_db: + return False + + if self.substrate == Config.Substrate.VM: + self.charm.update_mongos_args(config_server_db) + + return True + + def update_keyfile(self, key_file_contents: str) -> bool: + """Updates keyfile when necessary.""" + # keyfile is set by leader in application data, application data does not necessarily + # match what is on the machine. + current_key_file = self.charm.get_keyfile_contents() + if not key_file_contents or key_file_contents == current_key_file: + return False + + # put keyfile on the machine with appropriate permissions + self.charm.push_file_to_unit( + parent_dir=Config.MONGOD_CONF_DIR, file_name=KEY_FILE, file_contents=key_file_contents + ) + + if self.charm.unit.is_leader(): + self.charm.set_secret( + Config.Relations.APP_SCOPE, Config.Secrets.SECRET_KEYFILE_NAME, key_file_contents + ) + + return True + + def get_tls_statuses(self) -> Optional[StatusBase]: + """Returns statuses relevant to TLS.""" + if self.is_mongos_tls_missing(): + return BlockedStatus("mongos requires TLS to be enabled.") + + if self.is_config_server_tls_missing(): + return BlockedStatus("mongos has TLS enabled, but config-server does not.") + + if not self.is_ca_compatible(): + logger.error( + "mongos is integrated to a different CA than the config server. Please use the same CA for all cluster components." + ) + return BlockedStatus("mongos CA and Config-Server CA don't match.") + + return None + + def get_config_server_name(self) -> Optional[str]: + """Returns the name of the Juju Application that mongos is using as a config server.""" + if not self.model.get_relation(self.relation_name): + return None + + # metadata.yaml prevents having multiple config servers + return self.model.get_relation(self.relation_name).app.name + + def get_config_server_uri(self) -> str: + """Returns the short form URI of the config server.""" + return self.database_requires.fetch_relation_field( + self.model.get_relation(Config.Relations.CLUSTER_RELATIONS_NAME).id, + CONFIG_SERVER_DB_KEY, + ) + + def is_ca_compatible(self) -> bool: + """Returns true if both the mongos and the config server use the same CA.""" + config_server_relation = self.charm.model.get_relation(self.relation_name) + # base-case: nothing to compare + if not config_server_relation: + return True + + config_server_tls_ca = self.database_requires.fetch_relation_field( + config_server_relation.id, INT_TLS_CA_KEY + ) + + mongos_tls_ca = self.charm.tls.get_tls_secret( + internal=True, label_name=Config.TLS.SECRET_CA_LABEL + ) + + # base-case: missing one or more CA's to compare + if not config_server_tls_ca and not mongos_tls_ca: + return True + + return config_server_tls_ca == mongos_tls_ca + + def is_mongos_tls_missing(self) -> bool: + """Returns true if the config-server has TLS enabled but mongos does not.""" + config_server_relation = self.charm.model.get_relation(self.relation_name) + if not config_server_relation: + return False + + mongos_has_tls = self.charm.model.get_relation(Config.TLS.TLS_PEER_RELATION) is not None + config_server_has_tls = ( + self.database_requires.fetch_relation_field(config_server_relation.id, INT_TLS_CA_KEY) + is not None + ) + if config_server_has_tls and not mongos_has_tls: + return True + + return False + + def is_config_server_tls_missing(self) -> bool: + """Returns true if the mongos has TLS enabled but the config-server does not.""" + config_server_relation = self.charm.model.get_relation(self.relation_name) + if not config_server_relation: + return False + + mongos_has_tls = self.charm.model.get_relation(Config.TLS.TLS_PEER_RELATION) is not None + config_server_has_tls = ( + self.database_requires.fetch_relation_field(config_server_relation.id, INT_TLS_CA_KEY) + is not None + ) + if not config_server_has_tls and mongos_has_tls: + return True + + return False + + # END: helper functions diff --git a/lib/charms/mongodb/v0/mongodb_tls.py b/lib/charms/mongodb/v0/mongodb_tls.py new file mode 100644 index 00000000..6669c1c3 --- /dev/null +++ b/lib/charms/mongodb/v0/mongodb_tls.py @@ -0,0 +1,365 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""In this class we manage client database relations. + +This class creates user and database for each application relation +and expose needed information for client connection via fields in +external relation. +""" +import base64 +import logging +import re +import socket +from typing import List, Optional, Tuple + +from charms.tls_certificates_interface.v3.tls_certificates import ( + CertificateAvailableEvent, + CertificateExpiringEvent, + TLSCertificatesRequiresV3, + generate_csr, + generate_private_key, +) +from ops.charm import ActionEvent, RelationBrokenEvent, RelationJoinedEvent +from ops.framework import Object +from ops.model import ActiveStatus, MaintenanceStatus, Unit, WaitingStatus + +from config import Config + +UNIT_SCOPE = Config.Relations.UNIT_SCOPE +Scopes = Config.Relations.Scopes + + +# The unique Charmhub library identifier, never change it +LIBID = "e02a50f0795e4dd292f58e93b4f493dd" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 15 + +logger = logging.getLogger(__name__) + + +class MongoDBTLS(Object): + """In this class we manage client database relations.""" + + def __init__(self, charm, peer_relation, substrate): + """Manager of MongoDB client relations.""" + super().__init__(charm, "client-relations") + self.charm = charm + self.substrate = substrate + self.peer_relation = peer_relation + self.certs = TLSCertificatesRequiresV3(self.charm, Config.TLS.TLS_PEER_RELATION) + self.framework.observe( + self.charm.on.set_tls_private_key_action, self._on_set_tls_private_key + ) + self.framework.observe( + self.charm.on[Config.TLS.TLS_PEER_RELATION].relation_joined, + self._on_tls_relation_joined, + ) + self.framework.observe( + self.charm.on[Config.TLS.TLS_PEER_RELATION].relation_broken, + self._on_tls_relation_broken, + ) + self.framework.observe(self.certs.on.certificate_available, self._on_certificate_available) + self.framework.observe(self.certs.on.certificate_expiring, self._on_certificate_expiring) + + def is_tls_enabled(self, internal: bool): + """Returns a boolean indicating if TLS for a given internal/external is enabled.""" + return self.get_tls_secret(internal, Config.TLS.SECRET_CERT_LABEL) is not None + + def _on_set_tls_private_key(self, event: ActionEvent) -> None: + """Set the TLS private key, which will be used for requesting the certificate.""" + logger.debug("Request to set TLS private key received.") + if self.charm.is_role(Config.Role.MONGOS) and not self.charm.has_config_server(): + logger.error( + "mongos is not running (not integrated to config-server) deferring renewal of certificates." + ) + event.fail("Mongos cannot set TLS keys until integrated to config-server.") + return + + if self.charm.upgrade_in_progress: + logger.warning("Setting TLS key during an upgrade is not supported.") + event.fail("Setting TLS key during an upgrade is not supported.") + return + + try: + self.request_certificate(event.params.get("external-key", None), internal=False) + self.request_certificate(event.params.get("internal-key", None), internal=True) + logger.debug("Successfully set TLS private key.") + except ValueError as e: + event.fail(str(e)) + + def request_certificate( + self, + param: Optional[str], + internal: bool, + ): + """Request TLS certificate.""" + if param is None: + key = generate_private_key() + else: + key = self._parse_tls_file(param) + + csr = generate_csr( + private_key=key, + subject=self._get_subject_name(), + organization=self._get_subject_name(), + sans=self._get_sans(), + sans_ip=[str(self.charm.model.get_binding(self.peer_relation).network.bind_address)], + ) + self.set_tls_secret(internal, Config.TLS.SECRET_KEY_LABEL, key.decode("utf-8")) + self.set_tls_secret(internal, Config.TLS.SECRET_CSR_LABEL, csr.decode("utf-8")) + self.set_tls_secret(internal, Config.TLS.SECRET_CERT_LABEL, None) + + label = "int" if internal else "ext" + self.charm.unit_peer_data[f"{label}_certs_subject"] = self._get_subject_name() + self.charm.unit_peer_data[f"{label}_certs_subject"] = self._get_subject_name() + + if self.charm.model.get_relation(Config.TLS.TLS_PEER_RELATION): + self.certs.request_certificate_creation(certificate_signing_request=csr) + + @staticmethod + def _parse_tls_file(raw_content: str) -> bytes: + """Parse TLS files from both plain text or base64 format.""" + if re.match(r"(-+(BEGIN|END) [A-Z ]+-+)", raw_content): + return ( + re.sub( + r"(-+(BEGIN|END) [A-Z ]+-+)", + "\\1", + raw_content, + ) + .rstrip() + .encode("utf-8") + ) + return base64.b64decode(raw_content) + + def _on_tls_relation_joined(self, event: RelationJoinedEvent) -> None: + """Request certificate when TLS relation joined.""" + if self.charm.is_role(Config.Role.MONGOS) and not self.charm.has_config_server(): + logger.info( + "mongos is not running (not integrated to config-server) deferring renewal of certificates." + ) + event.defer() + return + + if self.charm.upgrade_in_progress: + logger.warning( + "Enabling TLS is not supported during an upgrade. The charm may be in a broken, unrecoverable state." + ) + event.defer() + return + + self.request_certificate(None, internal=True) + self.request_certificate(None, internal=False) + + def _on_tls_relation_broken(self, event: RelationBrokenEvent) -> None: + """Disable TLS when TLS relation broken.""" + logger.debug("Disabling external and internal TLS for unit: %s", self.charm.unit.name) + if self.charm.upgrade_in_progress: + logger.warning( + "Disabling TLS is not supported during an upgrade. The charm may be in a broken, unrecoverable state." + ) + + for internal in [True, False]: + self.set_tls_secret(internal, Config.TLS.SECRET_CA_LABEL, None) + self.set_tls_secret(internal, Config.TLS.SECRET_CERT_LABEL, None) + self.set_tls_secret(internal, Config.TLS.SECRET_CHAIN_LABEL, None) + + if self.charm.is_role(Config.Role.CONFIG_SERVER): + self.charm.cluster.update_ca_secret(new_ca=None) + self.charm.config_server.update_ca_secret(new_ca=None) + + logger.info("Restarting mongod with TLS disabled.") + self.charm.status.set_and_share_status(MaintenanceStatus("disabling TLS")) + self.charm.delete_tls_certificate_from_workload() + self.charm.restart_charm_services() + self.charm.status.set_and_share_status(ActiveStatus()) + + def _on_certificate_available(self, event: CertificateAvailableEvent) -> None: + """Enable TLS when TLS certificate available.""" + if self.charm.is_role(Config.Role.MONGOS) and not self.charm.config_server_db: + logger.debug( + "mongos requires config-server in order to start, do not restart with TLS until integrated to config-server" + ) + event.defer() + return + + int_csr = self.get_tls_secret(internal=True, label_name=Config.TLS.SECRET_CSR_LABEL) + ext_csr = self.get_tls_secret(internal=False, label_name=Config.TLS.SECRET_CSR_LABEL) + + if ext_csr and event.certificate_signing_request.rstrip() == ext_csr.rstrip(): + logger.debug("The external TLS certificate available.") + internal = False + elif int_csr and event.certificate_signing_request.rstrip() == int_csr.rstrip(): + logger.debug("The internal TLS certificate available.") + internal = True + else: + logger.error("An unknown certificate is available -- ignoring.") + return + + self.set_tls_secret( + internal, + Config.TLS.SECRET_CHAIN_LABEL, + "\n".join(event.chain) if event.chain is not None else None, + ) + self.set_tls_secret(internal, Config.TLS.SECRET_CERT_LABEL, event.certificate) + self.set_tls_secret(internal, Config.TLS.SECRET_CA_LABEL, event.ca) + + if self.charm.is_role(Config.Role.CONFIG_SERVER) and internal: + self.charm.cluster.update_ca_secret(new_ca=event.ca) + self.charm.config_server.update_ca_secret(new_ca=event.ca) + + if self.waiting_for_certs(): + logger.debug( + "Defer till both internal and external TLS certificates available to avoid second restart." + ) + event.defer() + return + + logger.info("Restarting mongod with TLS enabled.") + + self.charm.delete_tls_certificate_from_workload() + self.charm.push_tls_certificate_to_workload() + self.charm.status.set_and_share_status(MaintenanceStatus("enabling TLS")) + self.charm.restart_charm_services() + + if not self.charm.is_db_service_ready(): + self.charm.status.set_and_share_status(WaitingStatus("Waiting for MongoDB to start")) + elif self.charm.unit.status == WaitingStatus( + "Waiting for MongoDB to start" + ) or self.charm.unit.status == MaintenanceStatus("enabling TLS"): + # clear waiting status if db service is ready + self.charm.status.set_and_share_status(ActiveStatus()) + + def waiting_for_certs(self): + """Returns a boolean indicating whether additional certs are needed.""" + if not self.get_tls_secret(internal=True, label_name=Config.TLS.SECRET_CERT_LABEL): + logger.debug("Waiting for internal certificate.") + return True + if not self.get_tls_secret(internal=False, label_name=Config.TLS.SECRET_CERT_LABEL): + logger.debug("Waiting for external certificate.") + return True + + return False + + def _on_certificate_expiring(self, event: CertificateExpiringEvent) -> None: + """Request the new certificate when old certificate is expiring.""" + if self.charm.is_role(Config.Role.MONGOS) and not self.charm.has_config_server(): + logger.info( + "mongos is not running (not integrated to config-server) deferring renewal of certificates." + ) + event.defer() + return + + if ( + event.certificate.rstrip() + == self.get_tls_secret( + internal=False, label_name=Config.TLS.SECRET_CERT_LABEL + ).rstrip() + ): + logger.debug("The external TLS certificate expiring.") + internal = False + elif ( + event.certificate.rstrip() + == self.get_tls_secret(internal=True, label_name=Config.TLS.SECRET_CERT_LABEL).rstrip() + ): + logger.debug("The internal TLS certificate expiring.") + + internal = True + else: + logger.error("An unknown certificate expiring.") + return + + logger.debug("Generating a new Certificate Signing Request.") + key = self.get_tls_secret(internal, Config.TLS.SECRET_KEY_LABEL).encode("utf-8") + old_csr = self.get_tls_secret(internal, Config.TLS.SECRET_CSR_LABEL).encode("utf-8") + new_csr = generate_csr( + private_key=key, + subject=self._get_subject_name(), + organization=self._get_subject_name(), + sans=self._get_sans(), + sans_ip=[str(self.charm.model.get_binding(self.peer_relation).network.bind_address)], + ) + logger.debug("Requesting a certificate renewal.") + + self.certs.request_certificate_renewal( + old_certificate_signing_request=old_csr, + new_certificate_signing_request=new_csr, + ) + + self.set_tls_secret(internal, Config.TLS.SECRET_CSR_LABEL, new_csr.decode("utf-8")) + + def _get_sans(self) -> List[str]: + """Create a list of DNS names for a MongoDB unit. + + Returns: + A list representing the hostnames of the MongoDB unit. + """ + unit_id = self.charm.unit.name.split("/")[1] + return [ + f"{self.charm.app.name}-{unit_id}", + socket.getfqdn(), + f"{self.charm.app.name}-{unit_id}.{self.charm.app.name}-endpoints", + str(self.charm.model.get_binding(self.peer_relation).network.bind_address), + "localhost", + ] + + def get_tls_files(self, internal: bool) -> Tuple[Optional[str], Optional[str]]: + """Prepare TLS files in special MongoDB way. + + MongoDB needs two files: + — CA file should have a full chain. + — PEM file should have private key and certificate without certificate chain. + """ + scope = "internal" if internal else "external" + if not self.is_tls_enabled(internal): + logging.debug(f"TLS disabled for {scope}") + return None, None + logging.debug(f"TLS *enabled* for {scope}, fetching data for CA and PEM files ") + + ca = self.get_tls_secret(internal, Config.TLS.SECRET_CA_LABEL) + chain = self.get_tls_secret(internal, Config.TLS.SECRET_CHAIN_LABEL) + ca_file = chain if chain else ca + + key = self.get_tls_secret(internal, Config.TLS.SECRET_KEY_LABEL) + cert = self.get_tls_secret(internal, Config.TLS.SECRET_CERT_LABEL) + pem_file = key + if cert: + pem_file = key + "\n" + cert if key else cert + + return ca_file, pem_file + + def get_host(self, unit: Unit): + """Retrieves the hostname of the unit based on the substrate.""" + if self.substrate == "vm": + return self.charm.unit_ip(unit) + else: + return self.charm.get_hostname_for_unit(unit) + + def set_tls_secret(self, internal: bool, label_name: str, contents: str) -> None: + """Sets TLS secret, based on whether or not it is related to internal connections.""" + scope = "int" if internal else "ext" + label_name = f"{scope}-{label_name}" + self.charm.set_secret(UNIT_SCOPE, label_name, contents) + + def get_tls_secret(self, internal: bool, label_name: str) -> str: + """Gets TLS secret, based on whether or not it is related to internal connections.""" + scope = "int" if internal else "ext" + label_name = f"{scope}-{label_name}" + return self.charm.get_secret(UNIT_SCOPE, label_name) + + def _get_subject_name(self) -> str: + """Generate the subject name for CSR.""" + # In sharded MongoDB deployments it is a requirement that all subject names match across + # all cluster components. The config-server name is the source of truth across mongos and + # shard deployments. + if not self.charm.is_role(Config.Role.CONFIG_SERVER): + # until integrated with config-server use current app name as + # subject name + return self.charm.get_config_server_name() or self.charm.app.name + + return self.charm.app.name diff --git a/lib/charms/mongodb/v1/helpers.py b/lib/charms/mongodb/v1/helpers.py new file mode 100644 index 00000000..2f6222ab --- /dev/null +++ b/lib/charms/mongodb/v1/helpers.py @@ -0,0 +1,322 @@ +"""Simple functions, which can be used in both K8s and VM charms.""" + +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. +import json +import logging +import os +import secrets +import string +import subprocess +from typing import List + +from charms.mongodb.v1.mongodb import MongoDBConfiguration +from ops.model import ActiveStatus, MaintenanceStatus, StatusBase, WaitingStatus + +from config import Config + +# The unique Charmhub library identifier, never change it +LIBID = "b9a7fe0c38d8486a9d1ce94c27d4758e" + +# Increment this major API version when introducing breaking changes +LIBAPI = 1 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 6 + +# path to store mongodb ketFile +KEY_FILE = "keyFile" +TLS_EXT_PEM_FILE = "external-cert.pem" +TLS_EXT_CA_FILE = "external-ca.crt" +TLS_INT_PEM_FILE = "internal-cert.pem" +TLS_INT_CA_FILE = "internal-ca.crt" + +MONGODB_COMMON_DIR = "/var/snap/charmed-mongodb/common" +MONGODB_SNAP_DATA_DIR = "/var/snap/charmed-mongodb/current" + +MONGO_SHELL = "charmed-mongodb.mongosh" + +DATA_DIR = "/var/lib/mongodb" +LOG_DIR = "/var/log/mongodb" +CONF_DIR = "/etc/mongod" +MONGODB_LOG_FILENAME = "mongodb.log" +logger = logging.getLogger(__name__) + + +def _get_logging_options(snap_install: bool) -> str: + """Returns config option for log path. + + :param snap_install: indicate that charmed-mongodb was installed from snap (VM charms) + :return: a path to log file to be used + """ + log_path = f"{LOG_DIR}/{MONGODB_LOG_FILENAME}" + if snap_install: + log_path = f"{MONGODB_COMMON_DIR}{log_path}" + return f"--logpath={log_path}" + + +def _get_audit_log_settings(snap_install: bool) -> List[str]: + """Return config options for audit log. + + :param snap_install: indicate that charmed-mongodb was installed from snap (VM charms) + :return: a list of audit log settings for charmed MongoDB + """ + audit_log_path = f"{LOG_DIR}/{Config.AuditLog.FILE_NAME}" + if snap_install: + audit_log_path = f"{MONGODB_COMMON_DIR}{audit_log_path}" + return [ + f"--auditDestination={Config.AuditLog.DESTINATION}", + f"--auditFormat={Config.AuditLog.FORMAT}", + f"--auditPath={audit_log_path}", + ] + + +# noinspection GrazieInspection +def get_create_user_cmd(config: MongoDBConfiguration, mongo_path=MONGO_SHELL) -> List[str]: + """Creates initial admin user for MongoDB. + + Initial admin user can be created only through localhost connection. + see https://www.mongodb.com/docs/manual/core/localhost-exception/ + unfortunately, pymongo not able to create connection which considered + as local connection by MongoDB, even if socket connection used. + As result where are only hackish ways to create initial user. + It is needed to install mongodb-clients inside charm container to make + this function work correctly + """ + return [ + mongo_path, + "mongodb://localhost/admin", + "--quiet", + "--eval", + "db.createUser({" + f" user: '{config.username}'," + " pwd: passwordPrompt()," + " roles:[" + " {'role': 'userAdminAnyDatabase', 'db': 'admin'}, " + " {'role': 'readWriteAnyDatabase', 'db': 'admin'}, " + " {'role': 'clusterAdmin', 'db': 'admin'}, " + " ]," + " mechanisms: ['SCRAM-SHA-256']," + " passwordDigestor: 'server'," + "})", + ] + + +def get_mongos_args( + config, + snap_install: bool = False, + config_server_db: str = None, + external_connectivity: bool = True, +) -> str: + """Returns the arguments used for starting mongos on a config-server side application. + + Returns: + A string representing the arguments to be passed to mongos. + """ + # suborinate charm which provides its own config_server_db, should only use unix domain socket + binding_ips = ( + "--bind_ip_all" + if external_connectivity + else f"--bind_ip {MONGODB_COMMON_DIR}/var/mongodb-27018.sock" + ) + + # mongos running on the config server communicates through localhost + config_server_db = config_server_db or f"{config.replset}/localhost:{Config.MONGODB_PORT}" + + full_conf_dir = f"{MONGODB_SNAP_DATA_DIR}{CONF_DIR}" if snap_install else CONF_DIR + cmd = [ + # mongos on config server side should run on 0.0.0.0 so it can be accessed by other units + # in the sharded cluster + binding_ips, + f"--configdb {config_server_db}", + # config server is already using 27017 + f"--port {Config.MONGOS_PORT}", + "--logRotate reopen", + "--logappend", + ] + + # TODO : generalise these into functions to be re-used + if config.tls_external: + cmd.extend( + [ + f"--tlsCAFile={full_conf_dir}/{TLS_EXT_CA_FILE}", + f"--tlsCertificateKeyFile={full_conf_dir}/{TLS_EXT_PEM_FILE}", + # allow non-TLS connections + "--tlsMode=preferTLS", + "--tlsDisabledProtocols=TLS1_0,TLS1_1", + ] + ) + + # internal TLS can be enabled only if external is enabled + if config.tls_internal and config.tls_external: + cmd.extend( + [ + "--clusterAuthMode=x509", + "--tlsAllowInvalidCertificates", + f"--tlsClusterCAFile={full_conf_dir}/{TLS_INT_CA_FILE}", + f"--tlsClusterFile={full_conf_dir}/{TLS_INT_PEM_FILE}", + ] + ) + else: + # keyFile used for authentication replica set peers if no internal tls configured. + cmd.extend( + [ + "--clusterAuthMode=keyFile", + f"--keyFile={full_conf_dir}/{KEY_FILE}", + ] + ) + + cmd.append("\n") + return " ".join(cmd) + + +def get_mongod_args( + config: MongoDBConfiguration, + auth: bool = True, + snap_install: bool = False, + role: str = "replication", +) -> str: + """Construct the MongoDB startup command line. + + Returns: + A string representing the command used to start MongoDB. + """ + full_data_dir = f"{MONGODB_COMMON_DIR}{DATA_DIR}" if snap_install else DATA_DIR + full_conf_dir = f"{MONGODB_SNAP_DATA_DIR}{CONF_DIR}" if snap_install else CONF_DIR + logging_options = _get_logging_options(snap_install) + audit_log_settings = _get_audit_log_settings(snap_install) + cmd = [ + # bind to localhost and external interfaces + "--bind_ip_all", + # part of replicaset + f"--replSet={config.replset}", + # db must be located within the snap common directory since the snap is strictly confined + f"--dbpath={full_data_dir}", + # for simplicity we run the mongod daemon on shards, configsvrs, and replicas on the same + # port + f"--port={Config.MONGODB_PORT}", + "--setParameter processUmask=037", # required for log files perminission (g+r) + "--logRotate reopen", + "--logappend", + logging_options, + ] + cmd.extend(audit_log_settings) + if auth: + cmd.extend(["--auth"]) + + if auth and not config.tls_internal: + # keyFile cannot be used without auth and cannot be used in tandem with internal TLS + cmd.extend( + [ + "--clusterAuthMode=keyFile", + f"--keyFile={full_conf_dir}/{KEY_FILE}", + ] + ) + + if config.tls_external: + cmd.extend( + [ + f"--tlsCAFile={full_conf_dir}/{TLS_EXT_CA_FILE}", + f"--tlsCertificateKeyFile={full_conf_dir}/{TLS_EXT_PEM_FILE}", + # allow non-TLS connections + "--tlsMode=preferTLS", + "--tlsDisabledProtocols=TLS1_0,TLS1_1", + ] + ) + + # internal TLS can be enabled only in external is enabled + if config.tls_internal and config.tls_external: + cmd.extend( + [ + "--clusterAuthMode=x509", + "--tlsAllowInvalidCertificates", + f"--tlsClusterCAFile={full_conf_dir}/{TLS_INT_CA_FILE}", + f"--tlsClusterFile={full_conf_dir}/{TLS_INT_PEM_FILE}", + ] + ) + + if role == Config.Role.CONFIG_SERVER: + cmd.append("--configsvr") + + if role == Config.Role.SHARD: + cmd.append("--shardsvr") + + cmd.append("\n") + return " ".join(cmd) + + +def generate_password() -> str: + """Generate a random password string. + + Returns: + A random password string. + """ + choices = string.ascii_letters + string.digits + return "".join([secrets.choice(choices) for _ in range(32)]) + + +def generate_keyfile() -> str: + """Key file used for authentication between replica set peers. + + Returns: + A maximum allowed random string. + """ + choices = string.ascii_letters + string.digits + return "".join([secrets.choice(choices) for _ in range(1024)]) + + +def copy_licenses_to_unit(): + """Copies licenses packaged in the snap to the charm's licenses directory.""" + os.makedirs("src/licenses", exist_ok=True) + subprocess.check_output("cp LICENSE src/licenses/LICENSE-charm", shell=True) + subprocess.check_output( + "cp -r /snap/charmed-mongodb/current/licenses/* src/licenses", shell=True + ) + + +def current_pbm_op(pbm_status: str) -> str: + """Parses pbm status for the operation that pbm is running.""" + pbm_status = json.loads(pbm_status) + return pbm_status["running"] if "running" in pbm_status else "" + + +def process_pbm_status(pbm_status: str) -> StatusBase: + """Parses current pbm operation and returns unit status.""" + current_op = current_pbm_op(pbm_status) + # no operations are currently running with pbm + if current_op == {}: + return ActiveStatus("") + + if current_op["type"] == "backup": + backup_id = current_op["name"] + return MaintenanceStatus(f"backup started/running, backup id:'{backup_id}'") + + if current_op["type"] == "restore": + backup_id = current_op["name"] + return MaintenanceStatus(f"restore started/running, backup id:'{backup_id}'") + + if current_op["type"] == "resync": + return WaitingStatus("waiting to sync s3 configurations.") + + return ActiveStatus() + + +def add_args_to_env(var: str, args: str): + """Adds the provided arguments to the environment as the provided variable.""" + with open(Config.ENV_VAR_PATH, "r") as env_var_file: + env_vars = env_var_file.readlines() + + args_added = False + for index, line in enumerate(env_vars): + if var in line: + args_added = True + env_vars[index] = f"{var}={args}" + + # if it is the first time adding these args to the file - will will need to append them to the + # file + if not args_added: + env_vars.append(f"{var}={args}") + + with open(Config.ENV_VAR_PATH, "w") as service_file: + service_file.writelines(env_vars) diff --git a/lib/charms/tls_certificates_interface/v3/tls_certificates.py b/lib/charms/tls_certificates_interface/v3/tls_certificates.py new file mode 100644 index 00000000..dcc6d94d --- /dev/null +++ b/lib/charms/tls_certificates_interface/v3/tls_certificates.py @@ -0,0 +1,2009 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + + +"""Library for the tls-certificates relation. + +This library contains the Requires and Provides classes for handling the tls-certificates +interface. + +Pre-requisites: + - Juju >= 3.0 + +## Getting Started +From a charm directory, fetch the library using `charmcraft`: + +```shell +charmcraft fetch-lib charms.tls_certificates_interface.v3.tls_certificates +``` + +Add the following libraries to the charm's `requirements.txt` file: +- jsonschema +- cryptography >= 42.0.0 + +Add the following section to the charm's `charmcraft.yaml` file: +```yaml +parts: + charm: + build-packages: + - libffi-dev + - libssl-dev + - rustc + - cargo +``` + +### Provider charm +The provider charm is the charm providing certificates to another charm that requires them. In +this example, the provider charm is storing its private key using a peer relation interface called +`replicas`. + +Example: +```python +from charms.tls_certificates_interface.v3.tls_certificates import ( + CertificateCreationRequestEvent, + CertificateRevocationRequestEvent, + TLSCertificatesProvidesV3, + generate_private_key, +) +from ops.charm import CharmBase, InstallEvent +from ops.main import main +from ops.model import ActiveStatus, WaitingStatus + + +def generate_ca(private_key: bytes, subject: str) -> str: + return "whatever ca content" + + +def generate_certificate(ca: str, private_key: str, csr: str) -> str: + return "Whatever certificate" + + +class ExampleProviderCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + self.certificates = TLSCertificatesProvidesV3(self, "certificates") + self.framework.observe( + self.certificates.on.certificate_request, + self._on_certificate_request + ) + self.framework.observe( + self.certificates.on.certificate_revocation_request, + self._on_certificate_revocation_request + ) + self.framework.observe(self.on.install, self._on_install) + + def _on_install(self, event: InstallEvent) -> None: + private_key_password = b"banana" + private_key = generate_private_key(password=private_key_password) + ca_certificate = generate_ca(private_key=private_key, subject="whatever") + replicas_relation = self.model.get_relation("replicas") + if not replicas_relation: + self.unit.status = WaitingStatus("Waiting for peer relation to be created") + event.defer() + return + replicas_relation.data[self.app].update( + { + "private_key_password": "banana", + "private_key": private_key, + "ca_certificate": ca_certificate, + } + ) + self.unit.status = ActiveStatus() + + def _on_certificate_request(self, event: CertificateCreationRequestEvent) -> None: + replicas_relation = self.model.get_relation("replicas") + if not replicas_relation: + self.unit.status = WaitingStatus("Waiting for peer relation to be created") + event.defer() + return + ca_certificate = replicas_relation.data[self.app].get("ca_certificate") + private_key = replicas_relation.data[self.app].get("private_key") + certificate = generate_certificate( + ca=ca_certificate, + private_key=private_key, + csr=event.certificate_signing_request, + ) + + self.certificates.set_relation_certificate( + certificate=certificate, + certificate_signing_request=event.certificate_signing_request, + ca=ca_certificate, + chain=[ca_certificate, certificate], + relation_id=event.relation_id, + recommended_expiry_notification_time=720, + ) + + def _on_certificate_revocation_request(self, event: CertificateRevocationRequestEvent) -> None: + # Do what you want to do with this information + pass + + +if __name__ == "__main__": + main(ExampleProviderCharm) +``` + +### Requirer charm +The requirer charm is the charm requiring certificates from another charm that provides them. In +this example, the requirer charm is storing its certificates using a peer relation interface called +`replicas`. + +Example: +```python +from charms.tls_certificates_interface.v3.tls_certificates import ( + CertificateAvailableEvent, + CertificateExpiringEvent, + CertificateRevokedEvent, + TLSCertificatesRequiresV3, + generate_csr, + generate_private_key, +) +from ops.charm import CharmBase, RelationCreatedEvent +from ops.main import main +from ops.model import ActiveStatus, WaitingStatus +from typing import Union + + +class ExampleRequirerCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + self.cert_subject = "whatever" + self.certificates = TLSCertificatesRequiresV3(self, "certificates") + self.framework.observe(self.on.install, self._on_install) + self.framework.observe( + self.on.certificates_relation_created, self._on_certificates_relation_created + ) + self.framework.observe( + self.certificates.on.certificate_available, self._on_certificate_available + ) + self.framework.observe( + self.certificates.on.certificate_expiring, self._on_certificate_expiring + ) + self.framework.observe( + self.certificates.on.certificate_invalidated, self._on_certificate_invalidated + ) + self.framework.observe( + self.certificates.on.all_certificates_invalidated, + self._on_all_certificates_invalidated + ) + + def _on_install(self, event) -> None: + private_key_password = b"banana" + private_key = generate_private_key(password=private_key_password) + replicas_relation = self.model.get_relation("replicas") + if not replicas_relation: + self.unit.status = WaitingStatus("Waiting for peer relation to be created") + event.defer() + return + replicas_relation.data[self.app].update( + {"private_key_password": "banana", "private_key": private_key.decode()} + ) + + def _on_certificates_relation_created(self, event: RelationCreatedEvent) -> None: + replicas_relation = self.model.get_relation("replicas") + if not replicas_relation: + self.unit.status = WaitingStatus("Waiting for peer relation to be created") + event.defer() + return + private_key_password = replicas_relation.data[self.app].get("private_key_password") + private_key = replicas_relation.data[self.app].get("private_key") + csr = generate_csr( + private_key=private_key.encode(), + private_key_password=private_key_password.encode(), + subject=self.cert_subject, + ) + replicas_relation.data[self.app].update({"csr": csr.decode()}) + self.certificates.request_certificate_creation(certificate_signing_request=csr) + + def _on_certificate_available(self, event: CertificateAvailableEvent) -> None: + replicas_relation = self.model.get_relation("replicas") + if not replicas_relation: + self.unit.status = WaitingStatus("Waiting for peer relation to be created") + event.defer() + return + replicas_relation.data[self.app].update({"certificate": event.certificate}) + replicas_relation.data[self.app].update({"ca": event.ca}) + replicas_relation.data[self.app].update({"chain": event.chain}) + self.unit.status = ActiveStatus() + + def _on_certificate_expiring( + self, event: Union[CertificateExpiringEvent, CertificateInvalidatedEvent] + ) -> None: + replicas_relation = self.model.get_relation("replicas") + if not replicas_relation: + self.unit.status = WaitingStatus("Waiting for peer relation to be created") + event.defer() + return + old_csr = replicas_relation.data[self.app].get("csr") + private_key_password = replicas_relation.data[self.app].get("private_key_password") + private_key = replicas_relation.data[self.app].get("private_key") + new_csr = generate_csr( + private_key=private_key.encode(), + private_key_password=private_key_password.encode(), + subject=self.cert_subject, + ) + self.certificates.request_certificate_renewal( + old_certificate_signing_request=old_csr, + new_certificate_signing_request=new_csr, + ) + replicas_relation.data[self.app].update({"csr": new_csr.decode()}) + + def _certificate_revoked(self) -> None: + old_csr = replicas_relation.data[self.app].get("csr") + private_key_password = replicas_relation.data[self.app].get("private_key_password") + private_key = replicas_relation.data[self.app].get("private_key") + new_csr = generate_csr( + private_key=private_key.encode(), + private_key_password=private_key_password.encode(), + subject=self.cert_subject, + ) + self.certificates.request_certificate_renewal( + old_certificate_signing_request=old_csr, + new_certificate_signing_request=new_csr, + ) + replicas_relation.data[self.app].update({"csr": new_csr.decode()}) + replicas_relation.data[self.app].pop("certificate") + replicas_relation.data[self.app].pop("ca") + replicas_relation.data[self.app].pop("chain") + self.unit.status = WaitingStatus("Waiting for new certificate") + + def _on_certificate_invalidated(self, event: CertificateInvalidatedEvent) -> None: + replicas_relation = self.model.get_relation("replicas") + if not replicas_relation: + self.unit.status = WaitingStatus("Waiting for peer relation to be created") + event.defer() + return + if event.reason == "revoked": + self._certificate_revoked() + if event.reason == "expired": + self._on_certificate_expiring(event) + + def _on_all_certificates_invalidated(self, event: AllCertificatesInvalidatedEvent) -> None: + # Do what you want with this information, probably remove all certificates. + pass + + +if __name__ == "__main__": + main(ExampleRequirerCharm) +``` + +You can relate both charms by running: + +```bash +juju relate +``` + +""" # noqa: D405, D410, D411, D214, D416 + +import copy +import ipaddress +import json +import logging +import uuid +from contextlib import suppress +from dataclasses import dataclass +from datetime import datetime, timedelta, timezone +from typing import List, Literal, Optional, Union + +from cryptography import x509 +from cryptography.hazmat._oid import ExtensionOID +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import rsa +from jsonschema import exceptions, validate +from ops.charm import ( + CharmBase, + CharmEvents, + RelationBrokenEvent, + RelationChangedEvent, + SecretExpiredEvent, +) +from ops.framework import EventBase, EventSource, Handle, Object +from ops.jujuversion import JujuVersion +from ops.model import ( + Application, + ModelError, + Relation, + RelationDataContent, + SecretNotFoundError, + Unit, +) + +# The unique Charmhub library identifier, never change it +LIBID = "afd8c2bccf834997afce12c2706d2ede" + +# Increment this major API version when introducing breaking changes +LIBAPI = 3 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 17 + +PYDEPS = ["cryptography", "jsonschema"] + +REQUIRER_JSON_SCHEMA = { + "$schema": "http://json-schema.org/draft-04/schema#", + "$id": "https://canonical.github.io/charm-relation-interfaces/interfaces/tls_certificates/v1/schemas/requirer.json", + "type": "object", + "title": "`tls_certificates` requirer root schema", + "description": "The `tls_certificates` root schema comprises the entire requirer databag for this interface.", # noqa: E501 + "examples": [ + { + "certificate_signing_requests": [ + { + "certificate_signing_request": "-----BEGIN CERTIFICATE REQUEST-----\\nMIICWjCCAUICAQAwFTETMBEGA1UEAwwKYmFuYW5hLmNvbTCCASIwDQYJKoZIhvcN\\nAQEBBQADggEPADCCAQoCggEBANWlx9wE6cW7Jkb4DZZDOZoEjk1eDBMJ+8R4pyKp\\nFBeHMl1SQSDt6rAWsrfL3KOGiIHqrRY0B5H6c51L8LDuVrJG0bPmyQ6rsBo3gVke\\nDSivfSLtGvHtp8lwYnIunF8r858uYmblAR0tdXQNmnQvm+6GERvURQ6sxpgZ7iLC\\npPKDoPt+4GKWL10FWf0i82FgxWC2KqRZUtNbgKETQuARLig7etBmCnh20zmynorA\\ncY7vrpTPAaeQpGLNqqYvKV9W6yWVY08V+nqARrFrjk3vSioZSu8ZJUdZ4d9++SGl\\nbH7A6e77YDkX9i/dQ3Pa/iDtWO3tXS2MvgoxX1iSWlGNOHcCAwEAAaAAMA0GCSqG\\nSIb3DQEBCwUAA4IBAQCW1fKcHessy/ZhnIwAtSLznZeZNH8LTVOzkhVd4HA7EJW+\\nKVLBx8DnN7L3V2/uPJfHiOg4Rx7fi7LkJPegl3SCqJZ0N5bQS/KvDTCyLG+9E8Y+\\n7wqCmWiXaH1devimXZvazilu4IC2dSks2D8DPWHgsOdVks9bme8J3KjdNMQudegc\\newWZZ1Dtbd+Rn7cpKU3jURMwm4fRwGxbJ7iT5fkLlPBlyM/yFEik4SmQxFYrZCQg\\n0f3v4kBefTh5yclPy5tEH+8G0LMsbbo3dJ5mPKpAShi0QEKDLd7eR1R/712lYTK4\\ndi4XaEfqERgy68O4rvb4PGlJeRGS7AmL7Ss8wfAq\\n-----END CERTIFICATE REQUEST-----\\n" # noqa: E501 + }, + { + "certificate_signing_request": "-----BEGIN CERTIFICATE REQUEST-----\\nMIICWjCCAUICAQAwFTETMBEGA1UEAwwKYmFuYW5hLmNvbTCCASIwDQYJKoZIhvcN\\nAQEBBQADggEPADCCAQoCggEBAMk3raaX803cHvzlBF9LC7KORT46z4VjyU5PIaMb\\nQLIDgYKFYI0n5hf2Ra4FAHvOvEmW7bjNlHORFEmvnpcU5kPMNUyKFMTaC8LGmN8z\\nUBH3aK+0+FRvY4afn9tgj5435WqOG9QdoDJ0TJkjJbJI9M70UOgL711oU7ql6HxU\\n4d2ydFK9xAHrBwziNHgNZ72L95s4gLTXf0fAHYf15mDA9U5yc+YDubCKgTXzVySQ\\nUx73VCJLfC/XkZIh559IrnRv5G9fu6BMLEuBwAz6QAO4+/XidbKWN4r2XSq5qX4n\\n6EPQQWP8/nd4myq1kbg6Q8w68L/0YdfjCmbyf2TuoWeImdUCAwEAAaAAMA0GCSqG\\nSIb3DQEBCwUAA4IBAQBIdwraBvpYo/rl5MH1+1Um6HRg4gOdQPY5WcJy9B9tgzJz\\nittRSlRGTnhyIo6fHgq9KHrmUthNe8mMTDailKFeaqkVNVvk7l0d1/B90Kz6OfmD\\nxN0qjW53oP7y3QB5FFBM8DjqjmUnz5UePKoX4AKkDyrKWxMwGX5RoET8c/y0y9jp\\nvSq3Wh5UpaZdWbe1oVY8CqMVUEVQL2DPjtopxXFz2qACwsXkQZxWmjvZnRiP8nP8\\nbdFaEuh9Q6rZ2QdZDEtrU4AodPU3NaukFr5KlTUQt3w/cl+5//zils6G5zUWJ2pN\\ng7+t9PTvXHRkH+LnwaVnmsBFU2e05qADQbfIn7JA\\n-----END CERTIFICATE REQUEST-----\\n" # noqa: E501 + }, + ] + } + ], + "properties": { + "certificate_signing_requests": { + "type": "array", + "items": { + "type": "object", + "properties": { + "certificate_signing_request": {"type": "string"}, + "ca": {"type": "boolean"}, + }, + "required": ["certificate_signing_request"], + }, + } + }, + "required": ["certificate_signing_requests"], + "additionalProperties": True, +} + +PROVIDER_JSON_SCHEMA = { + "$schema": "http://json-schema.org/draft-04/schema#", + "$id": "https://canonical.github.io/charm-relation-interfaces/interfaces/tls_certificates/v1/schemas/provider.json", + "type": "object", + "title": "`tls_certificates` provider root schema", + "description": "The `tls_certificates` root schema comprises the entire provider databag for this interface.", # noqa: E501 + "examples": [ + { + "certificates": [ + { + "ca": "-----BEGIN CERTIFICATE-----\\nMIIDJTCCAg2gAwIBAgIUMsSK+4FGCjW6sL/EXMSxColmKw8wDQYJKoZIhvcNAQEL\\nBQAwIDELMAkGA1UEBhMCVVMxETAPBgNVBAMMCHdoYXRldmVyMB4XDTIyMDcyOTIx\\nMTgyN1oXDTIzMDcyOTIxMTgyN1owIDELMAkGA1UEBhMCVVMxETAPBgNVBAMMCHdo\\nYXRldmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA55N9DkgFWbJ/\\naqcdQhso7n1kFvt6j/fL1tJBvRubkiFMQJnZFtekfalN6FfRtA3jq+nx8o49e+7t\\nLCKT0xQ+wufXfOnxv6/if6HMhHTiCNPOCeztUgQ2+dfNwRhYYgB1P93wkUVjwudK\\n13qHTTZ6NtEF6EzOqhOCe6zxq6wrr422+ZqCvcggeQ5tW9xSd/8O1vNID/0MTKpy\\nET3drDtBfHmiUEIBR3T3tcy6QsIe4Rz/2sDinAcM3j7sG8uY6drh8jY3PWar9til\\nv2l4qDYSU8Qm5856AB1FVZRLRJkLxZYZNgreShAIYgEd0mcyI2EO/UvKxsIcxsXc\\nd45GhGpKkwIDAQABo1cwVTAfBgNVHQ4EGAQWBBRXBrXKh3p/aFdQjUcT/UcvICBL\\nODAhBgNVHSMEGjAYgBYEFFcGtcqHen9oV1CNRxP9Ry8gIEs4MA8GA1UdEwEB/wQF\\nMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAGmCEvcoFUrT9e133SHkgF/ZAgzeIziO\\nBjfAdU4fvAVTVfzaPm0yBnGqzcHyacCzbZjKQpaKVgc5e6IaqAQtf6cZJSCiJGhS\\nJYeosWrj3dahLOUAMrXRr8G/Ybcacoqc+osKaRa2p71cC3V6u2VvcHRV7HDFGJU7\\noijbdB+WhqET6Txe67rxZCJG9Ez3EOejBJBl2PJPpy7m1Ml4RR+E8YHNzB0lcBzc\\nEoiJKlDfKSO14E2CPDonnUoWBJWjEvJys3tbvKzsRj2fnLilytPFU0gH3cEjCopi\\nzFoWRdaRuNHYCqlBmso1JFDl8h4fMmglxGNKnKRar0WeGyxb4xXBGpI=\\n-----END CERTIFICATE-----\\n", # noqa: E501 + "chain": [ + "-----BEGIN CERTIFICATE-----\\nMIIDJTCCAg2gAwIBAgIUMsSK+4FGCjW6sL/EXMSxColmKw8wDQYJKoZIhvcNAQEL\\nBQAwIDELMAkGA1UEBhMCVVMxETAPBgNVBAMMCHdoYXRldmVyMB4XDTIyMDcyOTIx\\nMTgyN1oXDTIzMDcyOTIxMTgyN1owIDELMAkGA1UEBhMCVVMxETAPBgNVBAMMCHdo\\nYXRldmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA55N9DkgFWbJ/\\naqcdQhso7n1kFvt6j/fL1tJBvRubkiFMQJnZFtekfalN6FfRtA3jq+nx8o49e+7t\\nLCKT0xQ+wufXfOnxv6/if6HMhHTiCNPOCeztUgQ2+dfNwRhYYgB1P93wkUVjwudK\\n13qHTTZ6NtEF6EzOqhOCe6zxq6wrr422+ZqCvcggeQ5tW9xSd/8O1vNID/0MTKpy\\nET3drDtBfHmiUEIBR3T3tcy6QsIe4Rz/2sDinAcM3j7sG8uY6drh8jY3PWar9til\\nv2l4qDYSU8Qm5856AB1FVZRLRJkLxZYZNgreShAIYgEd0mcyI2EO/UvKxsIcxsXc\\nd45GhGpKkwIDAQABo1cwVTAfBgNVHQ4EGAQWBBRXBrXKh3p/aFdQjUcT/UcvICBL\\nODAhBgNVHSMEGjAYgBYEFFcGtcqHen9oV1CNRxP9Ry8gIEs4MA8GA1UdEwEB/wQF\\nMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAGmCEvcoFUrT9e133SHkgF/ZAgzeIziO\\nBjfAdU4fvAVTVfzaPm0yBnGqzcHyacCzbZjKQpaKVgc5e6IaqAQtf6cZJSCiJGhS\\nJYeosWrj3dahLOUAMrXRr8G/Ybcacoqc+osKaRa2p71cC3V6u2VvcHRV7HDFGJU7\\noijbdB+WhqET6Txe67rxZCJG9Ez3EOejBJBl2PJPpy7m1Ml4RR+E8YHNzB0lcBzc\\nEoiJKlDfKSO14E2CPDonnUoWBJWjEvJys3tbvKzsRj2fnLilytPFU0gH3cEjCopi\\nzFoWRdaRuNHYCqlBmso1JFDl8h4fMmglxGNKnKRar0WeGyxb4xXBGpI=\\n-----END CERTIFICATE-----\\n" # noqa: E501, W505 + ], + "certificate_signing_request": "-----BEGIN CERTIFICATE REQUEST-----\nMIICWjCCAUICAQAwFTETMBEGA1UEAwwKYmFuYW5hLmNvbTCCASIwDQYJKoZIhvcN\nAQEBBQADggEPADCCAQoCggEBANWlx9wE6cW7Jkb4DZZDOZoEjk1eDBMJ+8R4pyKp\nFBeHMl1SQSDt6rAWsrfL3KOGiIHqrRY0B5H6c51L8LDuVrJG0bPmyQ6rsBo3gVke\nDSivfSLtGvHtp8lwYnIunF8r858uYmblAR0tdXQNmnQvm+6GERvURQ6sxpgZ7iLC\npPKDoPt+4GKWL10FWf0i82FgxWC2KqRZUtNbgKETQuARLig7etBmCnh20zmynorA\ncY7vrpTPAaeQpGLNqqYvKV9W6yWVY08V+nqARrFrjk3vSioZSu8ZJUdZ4d9++SGl\nbH7A6e77YDkX9i/dQ3Pa/iDtWO3tXS2MvgoxX1iSWlGNOHcCAwEAAaAAMA0GCSqG\nSIb3DQEBCwUAA4IBAQCW1fKcHessy/ZhnIwAtSLznZeZNH8LTVOzkhVd4HA7EJW+\nKVLBx8DnN7L3V2/uPJfHiOg4Rx7fi7LkJPegl3SCqJZ0N5bQS/KvDTCyLG+9E8Y+\n7wqCmWiXaH1devimXZvazilu4IC2dSks2D8DPWHgsOdVks9bme8J3KjdNMQudegc\newWZZ1Dtbd+Rn7cpKU3jURMwm4fRwGxbJ7iT5fkLlPBlyM/yFEik4SmQxFYrZCQg\n0f3v4kBefTh5yclPy5tEH+8G0LMsbbo3dJ5mPKpAShi0QEKDLd7eR1R/712lYTK4\ndi4XaEfqERgy68O4rvb4PGlJeRGS7AmL7Ss8wfAq\n-----END CERTIFICATE REQUEST-----\n", # noqa: E501 + "certificate": "-----BEGIN CERTIFICATE-----\nMIICvDCCAaQCFFPAOD7utDTsgFrm0vS4We18OcnKMA0GCSqGSIb3DQEBCwUAMCAx\nCzAJBgNVBAYTAlVTMREwDwYDVQQDDAh3aGF0ZXZlcjAeFw0yMjA3MjkyMTE5Mzha\nFw0yMzA3MjkyMTE5MzhaMBUxEzARBgNVBAMMCmJhbmFuYS5jb20wggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDVpcfcBOnFuyZG+A2WQzmaBI5NXgwTCfvE\neKciqRQXhzJdUkEg7eqwFrK3y9yjhoiB6q0WNAeR+nOdS/Cw7layRtGz5skOq7Aa\nN4FZHg0or30i7Rrx7afJcGJyLpxfK/OfLmJm5QEdLXV0DZp0L5vuhhEb1EUOrMaY\nGe4iwqTyg6D7fuBili9dBVn9IvNhYMVgtiqkWVLTW4ChE0LgES4oO3rQZgp4dtM5\nsp6KwHGO766UzwGnkKRizaqmLylfVusllWNPFfp6gEaxa45N70oqGUrvGSVHWeHf\nfvkhpWx+wOnu+2A5F/Yv3UNz2v4g7Vjt7V0tjL4KMV9YklpRjTh3AgMBAAEwDQYJ\nKoZIhvcNAQELBQADggEBAChjRzuba8zjQ7NYBVas89Oy7u++MlS8xWxh++yiUsV6\nWMk3ZemsPtXc1YmXorIQohtxLxzUPm2JhyzFzU/sOLmJQ1E/l+gtZHyRCwsb20fX\nmphuJsMVd7qv/GwEk9PBsk2uDqg4/Wix0Rx5lf95juJP7CPXQJl5FQauf3+LSz0y\nwF/j+4GqvrwsWr9hKOLmPdkyKkR6bHKtzzsxL9PM8GnElk2OpaPMMnzbL/vt2IAt\nxK01ZzPxCQCzVwHo5IJO5NR/fIyFbEPhxzG17QsRDOBR9fl9cOIvDeSO04vyZ+nz\n+kA2c3fNrZFAtpIlOOmFh8Q12rVL4sAjI5mVWnNEgvI=\n-----END CERTIFICATE-----\n", # noqa: E501 + } + ] + }, + { + "certificates": [ + { + "ca": "-----BEGIN CERTIFICATE-----\\nMIIDJTCCAg2gAwIBAgIUMsSK+4FGCjW6sL/EXMSxColmKw8wDQYJKoZIhvcNAQEL\\nBQAwIDELMAkGA1UEBhMCVVMxETAPBgNVBAMMCHdoYXRldmVyMB4XDTIyMDcyOTIx\\nMTgyN1oXDTIzMDcyOTIxMTgyN1owIDELMAkGA1UEBhMCVVMxETAPBgNVBAMMCHdo\\nYXRldmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA55N9DkgFWbJ/\\naqcdQhso7n1kFvt6j/fL1tJBvRubkiFMQJnZFtekfalN6FfRtA3jq+nx8o49e+7t\\nLCKT0xQ+wufXfOnxv6/if6HMhHTiCNPOCeztUgQ2+dfNwRhYYgB1P93wkUVjwudK\\n13qHTTZ6NtEF6EzOqhOCe6zxq6wrr422+ZqCvcggeQ5tW9xSd/8O1vNID/0MTKpy\\nET3drDtBfHmiUEIBR3T3tcy6QsIe4Rz/2sDinAcM3j7sG8uY6drh8jY3PWar9til\\nv2l4qDYSU8Qm5856AB1FVZRLRJkLxZYZNgreShAIYgEd0mcyI2EO/UvKxsIcxsXc\\nd45GhGpKkwIDAQABo1cwVTAfBgNVHQ4EGAQWBBRXBrXKh3p/aFdQjUcT/UcvICBL\\nODAhBgNVHSMEGjAYgBYEFFcGtcqHen9oV1CNRxP9Ry8gIEs4MA8GA1UdEwEB/wQF\\nMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAGmCEvcoFUrT9e133SHkgF/ZAgzeIziO\\nBjfAdU4fvAVTVfzaPm0yBnGqzcHyacCzbZjKQpaKVgc5e6IaqAQtf6cZJSCiJGhS\\nJYeosWrj3dahLOUAMrXRr8G/Ybcacoqc+osKaRa2p71cC3V6u2VvcHRV7HDFGJU7\\noijbdB+WhqET6Txe67rxZCJG9Ez3EOejBJBl2PJPpy7m1Ml4RR+E8YHNzB0lcBzc\\nEoiJKlDfKSO14E2CPDonnUoWBJWjEvJys3tbvKzsRj2fnLilytPFU0gH3cEjCopi\\nzFoWRdaRuNHYCqlBmso1JFDl8h4fMmglxGNKnKRar0WeGyxb4xXBGpI=\\n-----END CERTIFICATE-----\\n", # noqa: E501 + "chain": [ + "-----BEGIN CERTIFICATE-----\\nMIIDJTCCAg2gAwIBAgIUMsSK+4FGCjW6sL/EXMSxColmKw8wDQYJKoZIhvcNAQEL\\nBQAwIDELMAkGA1UEBhMCVVMxETAPBgNVBAMMCHdoYXRldmVyMB4XDTIyMDcyOTIx\\nMTgyN1oXDTIzMDcyOTIxMTgyN1owIDELMAkGA1UEBhMCVVMxETAPBgNVBAMMCHdo\\nYXRldmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA55N9DkgFWbJ/\\naqcdQhso7n1kFvt6j/fL1tJBvRubkiFMQJnZFtekfalN6FfRtA3jq+nx8o49e+7t\\nLCKT0xQ+wufXfOnxv6/if6HMhHTiCNPOCeztUgQ2+dfNwRhYYgB1P93wkUVjwudK\\n13qHTTZ6NtEF6EzOqhOCe6zxq6wrr422+ZqCvcggeQ5tW9xSd/8O1vNID/0MTKpy\\nET3drDtBfHmiUEIBR3T3tcy6QsIe4Rz/2sDinAcM3j7sG8uY6drh8jY3PWar9til\\nv2l4qDYSU8Qm5856AB1FVZRLRJkLxZYZNgreShAIYgEd0mcyI2EO/UvKxsIcxsXc\\nd45GhGpKkwIDAQABo1cwVTAfBgNVHQ4EGAQWBBRXBrXKh3p/aFdQjUcT/UcvICBL\\nODAhBgNVHSMEGjAYgBYEFFcGtcqHen9oV1CNRxP9Ry8gIEs4MA8GA1UdEwEB/wQF\\nMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAGmCEvcoFUrT9e133SHkgF/ZAgzeIziO\\nBjfAdU4fvAVTVfzaPm0yBnGqzcHyacCzbZjKQpaKVgc5e6IaqAQtf6cZJSCiJGhS\\nJYeosWrj3dahLOUAMrXRr8G/Ybcacoqc+osKaRa2p71cC3V6u2VvcHRV7HDFGJU7\\noijbdB+WhqET6Txe67rxZCJG9Ez3EOejBJBl2PJPpy7m1Ml4RR+E8YHNzB0lcBzc\\nEoiJKlDfKSO14E2CPDonnUoWBJWjEvJys3tbvKzsRj2fnLilytPFU0gH3cEjCopi\\nzFoWRdaRuNHYCqlBmso1JFDl8h4fMmglxGNKnKRar0WeGyxb4xXBGpI=\\n-----END CERTIFICATE-----\\n" # noqa: E501, W505 + ], + "certificate_signing_request": "-----BEGIN CERTIFICATE REQUEST-----\nMIICWjCCAUICAQAwFTETMBEGA1UEAwwKYmFuYW5hLmNvbTCCASIwDQYJKoZIhvcN\nAQEBBQADggEPADCCAQoCggEBANWlx9wE6cW7Jkb4DZZDOZoEjk1eDBMJ+8R4pyKp\nFBeHMl1SQSDt6rAWsrfL3KOGiIHqrRY0B5H6c51L8LDuVrJG0bPmyQ6rsBo3gVke\nDSivfSLtGvHtp8lwYnIunF8r858uYmblAR0tdXQNmnQvm+6GERvURQ6sxpgZ7iLC\npPKDoPt+4GKWL10FWf0i82FgxWC2KqRZUtNbgKETQuARLig7etBmCnh20zmynorA\ncY7vrpTPAaeQpGLNqqYvKV9W6yWVY08V+nqARrFrjk3vSioZSu8ZJUdZ4d9++SGl\nbH7A6e77YDkX9i/dQ3Pa/iDtWO3tXS2MvgoxX1iSWlGNOHcCAwEAAaAAMA0GCSqG\nSIb3DQEBCwUAA4IBAQCW1fKcHessy/ZhnIwAtSLznZeZNH8LTVOzkhVd4HA7EJW+\nKVLBx8DnN7L3V2/uPJfHiOg4Rx7fi7LkJPegl3SCqJZ0N5bQS/KvDTCyLG+9E8Y+\n7wqCmWiXaH1devimXZvazilu4IC2dSks2D8DPWHgsOdVks9bme8J3KjdNMQudegc\newWZZ1Dtbd+Rn7cpKU3jURMwm4fRwGxbJ7iT5fkLlPBlyM/yFEik4SmQxFYrZCQg\n0f3v4kBefTh5yclPy5tEH+8G0LMsbbo3dJ5mPKpAShi0QEKDLd7eR1R/712lYTK4\ndi4XaEfqERgy68O4rvb4PGlJeRGS7AmL7Ss8wfAq\n-----END CERTIFICATE REQUEST-----\n", # noqa: E501 + "certificate": "-----BEGIN CERTIFICATE-----\nMIICvDCCAaQCFFPAOD7utDTsgFrm0vS4We18OcnKMA0GCSqGSIb3DQEBCwUAMCAx\nCzAJBgNVBAYTAlVTMREwDwYDVQQDDAh3aGF0ZXZlcjAeFw0yMjA3MjkyMTE5Mzha\nFw0yMzA3MjkyMTE5MzhaMBUxEzARBgNVBAMMCmJhbmFuYS5jb20wggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDVpcfcBOnFuyZG+A2WQzmaBI5NXgwTCfvE\neKciqRQXhzJdUkEg7eqwFrK3y9yjhoiB6q0WNAeR+nOdS/Cw7layRtGz5skOq7Aa\nN4FZHg0or30i7Rrx7afJcGJyLpxfK/OfLmJm5QEdLXV0DZp0L5vuhhEb1EUOrMaY\nGe4iwqTyg6D7fuBili9dBVn9IvNhYMVgtiqkWVLTW4ChE0LgES4oO3rQZgp4dtM5\nsp6KwHGO766UzwGnkKRizaqmLylfVusllWNPFfp6gEaxa45N70oqGUrvGSVHWeHf\nfvkhpWx+wOnu+2A5F/Yv3UNz2v4g7Vjt7V0tjL4KMV9YklpRjTh3AgMBAAEwDQYJ\nKoZIhvcNAQELBQADggEBAChjRzuba8zjQ7NYBVas89Oy7u++MlS8xWxh++yiUsV6\nWMk3ZemsPtXc1YmXorIQohtxLxzUPm2JhyzFzU/sOLmJQ1E/l+gtZHyRCwsb20fX\nmphuJsMVd7qv/GwEk9PBsk2uDqg4/Wix0Rx5lf95juJP7CPXQJl5FQauf3+LSz0y\nwF/j+4GqvrwsWr9hKOLmPdkyKkR6bHKtzzsxL9PM8GnElk2OpaPMMnzbL/vt2IAt\nxK01ZzPxCQCzVwHo5IJO5NR/fIyFbEPhxzG17QsRDOBR9fl9cOIvDeSO04vyZ+nz\n+kA2c3fNrZFAtpIlOOmFh8Q12rVL4sAjI5mVWnNEgvI=\n-----END CERTIFICATE-----\n", # noqa: E501 + "revoked": True, + } + ] + }, + ], + "properties": { + "certificates": { + "$id": "#/properties/certificates", + "type": "array", + "items": { + "$id": "#/properties/certificates/items", + "type": "object", + "required": ["certificate_signing_request", "certificate", "ca", "chain"], + "properties": { + "certificate_signing_request": { + "$id": "#/properties/certificates/items/certificate_signing_request", + "type": "string", + }, + "certificate": { + "$id": "#/properties/certificates/items/certificate", + "type": "string", + }, + "ca": {"$id": "#/properties/certificates/items/ca", "type": "string"}, + "chain": { + "$id": "#/properties/certificates/items/chain", + "type": "array", + "items": { + "type": "string", + "$id": "#/properties/certificates/items/chain/items", + }, + }, + "revoked": { + "$id": "#/properties/certificates/items/revoked", + "type": "boolean", + }, + }, + "additionalProperties": True, + }, + } + }, + "required": ["certificates"], + "additionalProperties": True, +} + + +logger = logging.getLogger(__name__) + + +@dataclass +class RequirerCSR: + """This class represents a certificate signing request from an interface Requirer.""" + + relation_id: int + application_name: str + unit_name: str + csr: str + is_ca: bool + + +@dataclass +class ProviderCertificate: + """This class represents a certificate from an interface Provider.""" + + relation_id: int + application_name: str + csr: str + certificate: str + ca: str + chain: List[str] + revoked: bool + expiry_time: datetime + expiry_notification_time: Optional[datetime] = None + + def chain_as_pem(self) -> str: + """Return full certificate chain as a PEM string.""" + return "\n\n".join(reversed(self.chain)) + + def to_json(self) -> str: + """Return the object as a JSON string. + + Returns: + str: JSON representation of the object + """ + return json.dumps( + { + "relation_id": self.relation_id, + "application_name": self.application_name, + "csr": self.csr, + "certificate": self.certificate, + "ca": self.ca, + "chain": self.chain, + "revoked": self.revoked, + "expiry_time": self.expiry_time.isoformat(), + "expiry_notification_time": self.expiry_notification_time.isoformat() + if self.expiry_notification_time + else None, + } + ) + + +class CertificateAvailableEvent(EventBase): + """Charm Event triggered when a TLS certificate is available.""" + + def __init__( + self, + handle: Handle, + certificate: str, + certificate_signing_request: str, + ca: str, + chain: List[str], + ): + super().__init__(handle) + self.certificate = certificate + self.certificate_signing_request = certificate_signing_request + self.ca = ca + self.chain = chain + + def snapshot(self) -> dict: + """Return snapshot.""" + return { + "certificate": self.certificate, + "certificate_signing_request": self.certificate_signing_request, + "ca": self.ca, + "chain": self.chain, + } + + def restore(self, snapshot: dict): + """Restore snapshot.""" + self.certificate = snapshot["certificate"] + self.certificate_signing_request = snapshot["certificate_signing_request"] + self.ca = snapshot["ca"] + self.chain = snapshot["chain"] + + def chain_as_pem(self) -> str: + """Return full certificate chain as a PEM string.""" + return "\n\n".join(reversed(self.chain)) + + +class CertificateExpiringEvent(EventBase): + """Charm Event triggered when a TLS certificate is almost expired.""" + + def __init__(self, handle, certificate: str, expiry: str): + """CertificateExpiringEvent. + + Args: + handle (Handle): Juju framework handle + certificate (str): TLS Certificate + expiry (str): Datetime string representing the time at which the certificate + won't be valid anymore. + """ + super().__init__(handle) + self.certificate = certificate + self.expiry = expiry + + def snapshot(self) -> dict: + """Return snapshot.""" + return {"certificate": self.certificate, "expiry": self.expiry} + + def restore(self, snapshot: dict): + """Restore snapshot.""" + self.certificate = snapshot["certificate"] + self.expiry = snapshot["expiry"] + + +class CertificateInvalidatedEvent(EventBase): + """Charm Event triggered when a TLS certificate is invalidated.""" + + def __init__( + self, + handle: Handle, + reason: Literal["expired", "revoked"], + certificate: str, + certificate_signing_request: str, + ca: str, + chain: List[str], + ): + super().__init__(handle) + self.reason = reason + self.certificate_signing_request = certificate_signing_request + self.certificate = certificate + self.ca = ca + self.chain = chain + + def snapshot(self) -> dict: + """Return snapshot.""" + return { + "reason": self.reason, + "certificate_signing_request": self.certificate_signing_request, + "certificate": self.certificate, + "ca": self.ca, + "chain": self.chain, + } + + def restore(self, snapshot: dict): + """Restore snapshot.""" + self.reason = snapshot["reason"] + self.certificate_signing_request = snapshot["certificate_signing_request"] + self.certificate = snapshot["certificate"] + self.ca = snapshot["ca"] + self.chain = snapshot["chain"] + + +class AllCertificatesInvalidatedEvent(EventBase): + """Charm Event triggered when all TLS certificates are invalidated.""" + + def __init__(self, handle: Handle): + super().__init__(handle) + + def snapshot(self) -> dict: + """Return snapshot.""" + return {} + + def restore(self, snapshot: dict): + """Restore snapshot.""" + pass + + +class CertificateCreationRequestEvent(EventBase): + """Charm Event triggered when a TLS certificate is required.""" + + def __init__( + self, + handle: Handle, + certificate_signing_request: str, + relation_id: int, + is_ca: bool = False, + ): + super().__init__(handle) + self.certificate_signing_request = certificate_signing_request + self.relation_id = relation_id + self.is_ca = is_ca + + def snapshot(self) -> dict: + """Return snapshot.""" + return { + "certificate_signing_request": self.certificate_signing_request, + "relation_id": self.relation_id, + "is_ca": self.is_ca, + } + + def restore(self, snapshot: dict): + """Restore snapshot.""" + self.certificate_signing_request = snapshot["certificate_signing_request"] + self.relation_id = snapshot["relation_id"] + self.is_ca = snapshot["is_ca"] + + +class CertificateRevocationRequestEvent(EventBase): + """Charm Event triggered when a TLS certificate needs to be revoked.""" + + def __init__( + self, + handle: Handle, + certificate: str, + certificate_signing_request: str, + ca: str, + chain: str, + ): + super().__init__(handle) + self.certificate = certificate + self.certificate_signing_request = certificate_signing_request + self.ca = ca + self.chain = chain + + def snapshot(self) -> dict: + """Return snapshot.""" + return { + "certificate": self.certificate, + "certificate_signing_request": self.certificate_signing_request, + "ca": self.ca, + "chain": self.chain, + } + + def restore(self, snapshot: dict): + """Restore snapshot.""" + self.certificate = snapshot["certificate"] + self.certificate_signing_request = snapshot["certificate_signing_request"] + self.ca = snapshot["ca"] + self.chain = snapshot["chain"] + + +def _load_relation_data(relation_data_content: RelationDataContent) -> dict: + """Load relation data from the relation data bag. + + Json loads all data. + + Args: + relation_data_content: Relation data from the databag + + Returns: + dict: Relation data in dict format. + """ + certificate_data = {} + try: + for key in relation_data_content: + try: + certificate_data[key] = json.loads(relation_data_content[key]) + except (json.decoder.JSONDecodeError, TypeError): + certificate_data[key] = relation_data_content[key] + except ModelError: + pass + return certificate_data + + +def _get_closest_future_time( + expiry_notification_time: datetime, expiry_time: datetime +) -> datetime: + """Return expiry_notification_time if not in the past, otherwise return expiry_time. + + Args: + expiry_notification_time (datetime): Notification time of impending expiration + expiry_time (datetime): Expiration time + + Returns: + datetime: expiry_notification_time if not in the past, expiry_time otherwise + """ + return ( + expiry_notification_time + if datetime.now(timezone.utc) < expiry_notification_time + else expiry_time + ) + + +def calculate_expiry_notification_time( + validity_start_time: datetime, + expiry_time: datetime, + provider_recommended_notification_time: Optional[int], + requirer_recommended_notification_time: Optional[int], +) -> datetime: + """Calculate a reasonable time to notify the user about the certificate expiry. + + It takes into account the time recommended by the provider and by the requirer. + Time recommended by the provider is preferred, + then time recommended by the requirer, + then dynamically calculated time. + + Args: + validity_start_time: Certificate validity time + expiry_time: Certificate expiry time + provider_recommended_notification_time: + Time in hours prior to expiry to notify the user. + Recommended by the provider. + requirer_recommended_notification_time: + Time in hours prior to expiry to notify the user. + Recommended by the requirer. + + Returns: + datetime: Time to notify the user about the certificate expiry. + """ + if provider_recommended_notification_time is not None: + provider_recommended_notification_time = abs(provider_recommended_notification_time) + provider_recommendation_time_delta = expiry_time - timedelta( + hours=provider_recommended_notification_time + ) + if validity_start_time < provider_recommendation_time_delta: + return provider_recommendation_time_delta + + if requirer_recommended_notification_time is not None: + requirer_recommended_notification_time = abs(requirer_recommended_notification_time) + requirer_recommendation_time_delta = expiry_time - timedelta( + hours=requirer_recommended_notification_time + ) + if validity_start_time < requirer_recommendation_time_delta: + return requirer_recommendation_time_delta + calculated_hours = (expiry_time - validity_start_time).total_seconds() / (3600 * 3) + return expiry_time - timedelta(hours=calculated_hours) + + +def generate_ca( + private_key: bytes, + subject: str, + private_key_password: Optional[bytes] = None, + validity: int = 365, + country: str = "US", +) -> bytes: + """Generate a CA Certificate. + + Args: + private_key (bytes): Private key + subject (str): Common Name that can be an IP or a Full Qualified Domain Name (FQDN). + private_key_password (bytes): Private key password + validity (int): Certificate validity time (in days) + country (str): Certificate Issuing country + + Returns: + bytes: CA Certificate. + """ + private_key_object = serialization.load_pem_private_key( + private_key, password=private_key_password + ) + subject_name = x509.Name( + [ + x509.NameAttribute(x509.NameOID.COUNTRY_NAME, country), + x509.NameAttribute(x509.NameOID.COMMON_NAME, subject), + ] + ) + subject_identifier_object = x509.SubjectKeyIdentifier.from_public_key( + private_key_object.public_key() # type: ignore[arg-type] + ) + subject_identifier = key_identifier = subject_identifier_object.public_bytes() + key_usage = x509.KeyUsage( + digital_signature=True, + key_encipherment=True, + key_cert_sign=True, + key_agreement=False, + content_commitment=False, + data_encipherment=False, + crl_sign=False, + encipher_only=False, + decipher_only=False, + ) + cert = ( + x509.CertificateBuilder() + .subject_name(subject_name) + .issuer_name(subject_name) + .public_key(private_key_object.public_key()) # type: ignore[arg-type] + .serial_number(x509.random_serial_number()) + .not_valid_before(datetime.now(timezone.utc)) + .not_valid_after(datetime.now(timezone.utc) + timedelta(days=validity)) + .add_extension(x509.SubjectKeyIdentifier(digest=subject_identifier), critical=False) + .add_extension( + x509.AuthorityKeyIdentifier( + key_identifier=key_identifier, + authority_cert_issuer=None, + authority_cert_serial_number=None, + ), + critical=False, + ) + .add_extension(key_usage, critical=True) + .add_extension( + x509.BasicConstraints(ca=True, path_length=None), + critical=True, + ) + .sign(private_key_object, hashes.SHA256()) # type: ignore[arg-type] + ) + return cert.public_bytes(serialization.Encoding.PEM) + + +def get_certificate_extensions( + authority_key_identifier: bytes, + csr: x509.CertificateSigningRequest, + alt_names: Optional[List[str]], + is_ca: bool, +) -> List[x509.Extension]: + """Generate a list of certificate extensions from a CSR and other known information. + + Args: + authority_key_identifier (bytes): Authority key identifier + csr (x509.CertificateSigningRequest): CSR + alt_names (list): List of alt names to put on cert - prefer putting SANs in CSR + is_ca (bool): Whether the certificate is a CA certificate + + Returns: + List[x509.Extension]: List of extensions + """ + cert_extensions_list: List[x509.Extension] = [ + x509.Extension( + oid=ExtensionOID.AUTHORITY_KEY_IDENTIFIER, + value=x509.AuthorityKeyIdentifier( + key_identifier=authority_key_identifier, + authority_cert_issuer=None, + authority_cert_serial_number=None, + ), + critical=False, + ), + x509.Extension( + oid=ExtensionOID.SUBJECT_KEY_IDENTIFIER, + value=x509.SubjectKeyIdentifier.from_public_key(csr.public_key()), + critical=False, + ), + x509.Extension( + oid=ExtensionOID.BASIC_CONSTRAINTS, + critical=True, + value=x509.BasicConstraints(ca=is_ca, path_length=None), + ), + ] + + sans: List[x509.GeneralName] = [] + san_alt_names = [x509.DNSName(name) for name in alt_names] if alt_names else [] + sans.extend(san_alt_names) + try: + loaded_san_ext = csr.extensions.get_extension_for_class(x509.SubjectAlternativeName) + sans.extend( + [x509.DNSName(name) for name in loaded_san_ext.value.get_values_for_type(x509.DNSName)] + ) + sans.extend( + [x509.IPAddress(ip) for ip in loaded_san_ext.value.get_values_for_type(x509.IPAddress)] + ) + sans.extend( + [ + x509.RegisteredID(oid) + for oid in loaded_san_ext.value.get_values_for_type(x509.RegisteredID) + ] + ) + except x509.ExtensionNotFound: + pass + + if sans: + cert_extensions_list.append( + x509.Extension( + oid=ExtensionOID.SUBJECT_ALTERNATIVE_NAME, + critical=False, + value=x509.SubjectAlternativeName(sans), + ) + ) + + if is_ca: + cert_extensions_list.append( + x509.Extension( + ExtensionOID.KEY_USAGE, + critical=True, + value=x509.KeyUsage( + digital_signature=False, + content_commitment=False, + key_encipherment=False, + data_encipherment=False, + key_agreement=False, + key_cert_sign=True, + crl_sign=True, + encipher_only=False, + decipher_only=False, + ), + ) + ) + + existing_oids = {ext.oid for ext in cert_extensions_list} + for extension in csr.extensions: + if extension.oid == ExtensionOID.SUBJECT_ALTERNATIVE_NAME: + continue + if extension.oid in existing_oids: + logger.warning("Extension %s is managed by the TLS provider, ignoring.", extension.oid) + continue + cert_extensions_list.append(extension) + + return cert_extensions_list + + +def generate_certificate( + csr: bytes, + ca: bytes, + ca_key: bytes, + ca_key_password: Optional[bytes] = None, + validity: int = 365, + alt_names: Optional[List[str]] = None, + is_ca: bool = False, +) -> bytes: + """Generate a TLS certificate based on a CSR. + + Args: + csr (bytes): CSR + ca (bytes): CA Certificate + ca_key (bytes): CA private key + ca_key_password: CA private key password + validity (int): Certificate validity (in days) + alt_names (list): List of alt names to put on cert - prefer putting SANs in CSR + is_ca (bool): Whether the certificate is a CA certificate + + Returns: + bytes: Certificate + """ + csr_object = x509.load_pem_x509_csr(csr) + subject = csr_object.subject + ca_pem = x509.load_pem_x509_certificate(ca) + issuer = ca_pem.issuer + private_key = serialization.load_pem_private_key(ca_key, password=ca_key_password) + + certificate_builder = ( + x509.CertificateBuilder() + .subject_name(subject) + .issuer_name(issuer) + .public_key(csr_object.public_key()) + .serial_number(x509.random_serial_number()) + .not_valid_before(datetime.now(timezone.utc)) + .not_valid_after(datetime.now(timezone.utc) + timedelta(days=validity)) + ) + extensions = get_certificate_extensions( + authority_key_identifier=ca_pem.extensions.get_extension_for_class( + x509.SubjectKeyIdentifier + ).value.key_identifier, + csr=csr_object, + alt_names=alt_names, + is_ca=is_ca, + ) + for extension in extensions: + try: + certificate_builder = certificate_builder.add_extension( + extval=extension.value, + critical=extension.critical, + ) + except ValueError as e: + logger.warning("Failed to add extension %s: %s", extension.oid, e) + + cert = certificate_builder.sign(private_key, hashes.SHA256()) # type: ignore[arg-type] + return cert.public_bytes(serialization.Encoding.PEM) + + +def generate_private_key( + password: Optional[bytes] = None, + key_size: int = 2048, + public_exponent: int = 65537, +) -> bytes: + """Generate a private key. + + Args: + password (bytes): Password for decrypting the private key + key_size (int): Key size in bytes + public_exponent: Public exponent. + + Returns: + bytes: Private Key + """ + private_key = rsa.generate_private_key( + public_exponent=public_exponent, + key_size=key_size, + ) + key_bytes = private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=( + serialization.BestAvailableEncryption(password) + if password + else serialization.NoEncryption() + ), + ) + return key_bytes + + +def generate_csr( # noqa: C901 + private_key: bytes, + subject: str, + add_unique_id_to_subject_name: bool = True, + organization: Optional[str] = None, + email_address: Optional[str] = None, + country_name: Optional[str] = None, + state_or_province_name: Optional[str] = None, + locality_name: Optional[str] = None, + private_key_password: Optional[bytes] = None, + sans: Optional[List[str]] = None, + sans_oid: Optional[List[str]] = None, + sans_ip: Optional[List[str]] = None, + sans_dns: Optional[List[str]] = None, + additional_critical_extensions: Optional[List] = None, +) -> bytes: + """Generate a CSR using private key and subject. + + Args: + private_key (bytes): Private key + subject (str): CSR Common Name that can be an IP or a Full Qualified Domain Name (FQDN). + add_unique_id_to_subject_name (bool): Whether a unique ID must be added to the CSR's + subject name. Always leave to "True" when the CSR is used to request certificates + using the tls-certificates relation. + organization (str): Name of organization. + email_address (str): Email address. + country_name (str): Country Name. + state_or_province_name (str): State or Province Name. + locality_name (str): Locality Name. + private_key_password (bytes): Private key password + sans (list): Use sans_dns - this will be deprecated in a future release + List of DNS subject alternative names (keeping it for now for backward compatibility) + sans_oid (list): List of registered ID SANs + sans_dns (list): List of DNS subject alternative names (similar to the arg: sans) + sans_ip (list): List of IP subject alternative names + additional_critical_extensions (list): List of critical additional extension objects. + Object must be a x509 ExtensionType. + + Returns: + bytes: CSR + """ + signing_key = serialization.load_pem_private_key(private_key, password=private_key_password) + subject_name = [x509.NameAttribute(x509.NameOID.COMMON_NAME, subject)] + if add_unique_id_to_subject_name: + unique_identifier = uuid.uuid4() + subject_name.append( + x509.NameAttribute(x509.NameOID.X500_UNIQUE_IDENTIFIER, str(unique_identifier)) + ) + if organization: + subject_name.append(x509.NameAttribute(x509.NameOID.ORGANIZATION_NAME, organization)) + if email_address: + subject_name.append(x509.NameAttribute(x509.NameOID.EMAIL_ADDRESS, email_address)) + if country_name: + subject_name.append(x509.NameAttribute(x509.NameOID.COUNTRY_NAME, country_name)) + if state_or_province_name: + subject_name.append( + x509.NameAttribute(x509.NameOID.STATE_OR_PROVINCE_NAME, state_or_province_name) + ) + if locality_name: + subject_name.append(x509.NameAttribute(x509.NameOID.LOCALITY_NAME, locality_name)) + csr = x509.CertificateSigningRequestBuilder(subject_name=x509.Name(subject_name)) + + _sans: List[x509.GeneralName] = [] + if sans_oid: + _sans.extend([x509.RegisteredID(x509.ObjectIdentifier(san)) for san in sans_oid]) + if sans_ip: + _sans.extend([x509.IPAddress(ipaddress.ip_address(san)) for san in sans_ip]) + if sans: + _sans.extend([x509.DNSName(san) for san in sans]) + if sans_dns: + _sans.extend([x509.DNSName(san) for san in sans_dns]) + if _sans: + csr = csr.add_extension(x509.SubjectAlternativeName(set(_sans)), critical=False) + + if additional_critical_extensions: + for extension in additional_critical_extensions: + csr = csr.add_extension(extension, critical=True) + + signed_certificate = csr.sign(signing_key, hashes.SHA256()) # type: ignore[arg-type] + return signed_certificate.public_bytes(serialization.Encoding.PEM) + + +def get_sha256_hex(data: str) -> str: + """Calculate the hash of the provided data and return the hexadecimal representation.""" + digest = hashes.Hash(hashes.SHA256()) + digest.update(data.encode()) + return digest.finalize().hex() + + +def csr_matches_certificate(csr: str, cert: str) -> bool: + """Check if a CSR matches a certificate. + + Args: + csr (str): Certificate Signing Request as a string + cert (str): Certificate as a string + Returns: + bool: True/False depending on whether the CSR matches the certificate. + """ + csr_object = x509.load_pem_x509_csr(csr.encode("utf-8")) + cert_object = x509.load_pem_x509_certificate(cert.encode("utf-8")) + + if csr_object.public_key().public_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PublicFormat.SubjectPublicKeyInfo, + ) != cert_object.public_key().public_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PublicFormat.SubjectPublicKeyInfo, + ): + return False + return True + + +def _relation_data_is_valid( + relation: Relation, app_or_unit: Union[Application, Unit], json_schema: dict +) -> bool: + """Check whether relation data is valid based on json schema. + + Args: + relation (Relation): Relation object + app_or_unit (Union[Application, Unit]): Application or unit object + json_schema (dict): Json schema + + Returns: + bool: Whether relation data is valid. + """ + relation_data = _load_relation_data(relation.data[app_or_unit]) + try: + validate(instance=relation_data, schema=json_schema) + return True + except exceptions.ValidationError: + return False + + +class CertificatesProviderCharmEvents(CharmEvents): + """List of events that the TLS Certificates provider charm can leverage.""" + + certificate_creation_request = EventSource(CertificateCreationRequestEvent) + certificate_revocation_request = EventSource(CertificateRevocationRequestEvent) + + +class CertificatesRequirerCharmEvents(CharmEvents): + """List of events that the TLS Certificates requirer charm can leverage.""" + + certificate_available = EventSource(CertificateAvailableEvent) + certificate_expiring = EventSource(CertificateExpiringEvent) + certificate_invalidated = EventSource(CertificateInvalidatedEvent) + all_certificates_invalidated = EventSource(AllCertificatesInvalidatedEvent) + + +class TLSCertificatesProvidesV3(Object): + """TLS certificates provider class to be instantiated by TLS certificates providers.""" + + on = CertificatesProviderCharmEvents() # type: ignore[reportAssignmentType] + + def __init__(self, charm: CharmBase, relationship_name: str): + super().__init__(charm, relationship_name) + self.framework.observe( + charm.on[relationship_name].relation_changed, self._on_relation_changed + ) + self.charm = charm + self.relationship_name = relationship_name + + def _load_app_relation_data(self, relation: Relation) -> dict: + """Load relation data from the application relation data bag. + + Json loads all data. + + Args: + relation: Relation data from the application databag + + Returns: + dict: Relation data in dict format. + """ + # If unit is not leader, it does not try to reach relation data. + if not self.model.unit.is_leader(): + return {} + return _load_relation_data(relation.data[self.charm.app]) + + def _add_certificate( + self, + relation_id: int, + certificate: str, + certificate_signing_request: str, + ca: str, + chain: List[str], + recommended_expiry_notification_time: Optional[int] = None, + ) -> None: + """Add certificate to relation data. + + Args: + relation_id (int): Relation id + certificate (str): Certificate + certificate_signing_request (str): Certificate Signing Request + ca (str): CA Certificate + chain (list): CA Chain + recommended_expiry_notification_time (int): + Time in hours before the certificate expires to notify the user. + + Returns: + None + """ + relation = self.model.get_relation( + relation_name=self.relationship_name, relation_id=relation_id + ) + if not relation: + raise RuntimeError( + f"Relation {self.relationship_name} does not exist - " + f"The certificate request can't be completed" + ) + new_certificate = { + "certificate": certificate, + "certificate_signing_request": certificate_signing_request, + "ca": ca, + "chain": chain, + "recommended_expiry_notification_time": recommended_expiry_notification_time, + } + provider_relation_data = self._load_app_relation_data(relation) + provider_certificates = provider_relation_data.get("certificates", []) + certificates = copy.deepcopy(provider_certificates) + if new_certificate in certificates: + logger.info("Certificate already in relation data - Doing nothing") + return + certificates.append(new_certificate) + relation.data[self.model.app]["certificates"] = json.dumps(certificates) + + def _remove_certificate( + self, + relation_id: int, + certificate: Optional[str] = None, + certificate_signing_request: Optional[str] = None, + ) -> None: + """Remove certificate from a given relation based on user provided certificate or csr. + + Args: + relation_id (int): Relation id + certificate (str): Certificate (optional) + certificate_signing_request: Certificate signing request (optional) + + Returns: + None + """ + relation = self.model.get_relation( + relation_name=self.relationship_name, + relation_id=relation_id, + ) + if not relation: + raise RuntimeError( + f"Relation {self.relationship_name} with relation id {relation_id} does not exist" + ) + provider_relation_data = self._load_app_relation_data(relation) + provider_certificates = provider_relation_data.get("certificates", []) + certificates = copy.deepcopy(provider_certificates) + for certificate_dict in certificates: + if certificate and certificate_dict["certificate"] == certificate: + certificates.remove(certificate_dict) + if ( + certificate_signing_request + and certificate_dict["certificate_signing_request"] == certificate_signing_request + ): + certificates.remove(certificate_dict) + relation.data[self.model.app]["certificates"] = json.dumps(certificates) + + def revoke_all_certificates(self) -> None: + """Revoke all certificates of this provider. + + This method is meant to be used when the Root CA has changed. + """ + for relation in self.model.relations[self.relationship_name]: + provider_relation_data = self._load_app_relation_data(relation) + provider_certificates = copy.deepcopy(provider_relation_data.get("certificates", [])) + for certificate in provider_certificates: + certificate["revoked"] = True + relation.data[self.model.app]["certificates"] = json.dumps(provider_certificates) + + def set_relation_certificate( + self, + certificate: str, + certificate_signing_request: str, + ca: str, + chain: List[str], + relation_id: int, + recommended_expiry_notification_time: Optional[int] = None, + ) -> None: + """Add certificates to relation data. + + Args: + certificate (str): Certificate + certificate_signing_request (str): Certificate signing request + ca (str): CA Certificate + chain (list): CA Chain + relation_id (int): Juju relation ID + recommended_expiry_notification_time (int): + Recommended time in hours before the certificate expires to notify the user. + + Returns: + None + """ + if not self.model.unit.is_leader(): + return + certificates_relation = self.model.get_relation( + relation_name=self.relationship_name, relation_id=relation_id + ) + if not certificates_relation: + raise RuntimeError(f"Relation {self.relationship_name} does not exist") + self._remove_certificate( + certificate_signing_request=certificate_signing_request.strip(), + relation_id=relation_id, + ) + self._add_certificate( + relation_id=relation_id, + certificate=certificate.strip(), + certificate_signing_request=certificate_signing_request.strip(), + ca=ca.strip(), + chain=[cert.strip() for cert in chain], + recommended_expiry_notification_time=recommended_expiry_notification_time, + ) + + def remove_certificate(self, certificate: str) -> None: + """Remove a given certificate from relation data. + + Args: + certificate (str): TLS Certificate + + Returns: + None + """ + certificates_relation = self.model.relations[self.relationship_name] + if not certificates_relation: + raise RuntimeError(f"Relation {self.relationship_name} does not exist") + for certificate_relation in certificates_relation: + self._remove_certificate(certificate=certificate, relation_id=certificate_relation.id) + + def get_issued_certificates( + self, relation_id: Optional[int] = None + ) -> List[ProviderCertificate]: + """Return a List of issued (non revoked) certificates. + + Returns: + List: List of ProviderCertificate objects + """ + provider_certificates = self.get_provider_certificates(relation_id=relation_id) + return [certificate for certificate in provider_certificates if not certificate.revoked] + + def get_provider_certificates( + self, relation_id: Optional[int] = None + ) -> List[ProviderCertificate]: + """Return a List of issued certificates. + + Returns: + List: List of ProviderCertificate objects + """ + certificates: List[ProviderCertificate] = [] + relations = ( + [ + relation + for relation in self.model.relations[self.relationship_name] + if relation.id == relation_id + ] + if relation_id is not None + else self.model.relations.get(self.relationship_name, []) + ) + for relation in relations: + if not relation.app: + logger.warning("Relation %s does not have an application", relation.id) + continue + provider_relation_data = self._load_app_relation_data(relation) + provider_certificates = provider_relation_data.get("certificates", []) + for certificate in provider_certificates: + try: + certificate_object = x509.load_pem_x509_certificate( + data=certificate["certificate"].encode() + ) + except ValueError as e: + logger.error("Could not load certificate - Skipping: %s", e) + continue + provider_certificate = ProviderCertificate( + relation_id=relation.id, + application_name=relation.app.name, + csr=certificate["certificate_signing_request"], + certificate=certificate["certificate"], + ca=certificate["ca"], + chain=certificate["chain"], + revoked=certificate.get("revoked", False), + expiry_time=certificate_object.not_valid_after_utc, + expiry_notification_time=certificate.get( + "recommended_expiry_notification_time" + ), + ) + certificates.append(provider_certificate) + return certificates + + def _on_relation_changed(self, event: RelationChangedEvent) -> None: + """Handle relation changed event. + + Looks at the relation data and either emits: + - certificate request event: If the unit relation data contains a CSR for which + a certificate does not exist in the provider relation data. + - certificate revocation event: If the provider relation data contains a CSR for which + a csr does not exist in the requirer relation data. + + Args: + event: Juju event + + Returns: + None + """ + if event.unit is None: + logger.error("Relation_changed event does not have a unit.") + return + if not self.model.unit.is_leader(): + return + if not _relation_data_is_valid(event.relation, event.unit, REQUIRER_JSON_SCHEMA): + logger.debug("Relation data did not pass JSON Schema validation") + return + provider_certificates = self.get_provider_certificates(relation_id=event.relation.id) + requirer_csrs = self.get_requirer_csrs(relation_id=event.relation.id) + provider_csrs = [ + certificate_creation_request.csr + for certificate_creation_request in provider_certificates + ] + for certificate_request in requirer_csrs: + if certificate_request.csr not in provider_csrs: + self.on.certificate_creation_request.emit( + certificate_signing_request=certificate_request.csr, + relation_id=certificate_request.relation_id, + is_ca=certificate_request.is_ca, + ) + self._revoke_certificates_for_which_no_csr_exists(relation_id=event.relation.id) + + def _revoke_certificates_for_which_no_csr_exists(self, relation_id: int) -> None: + """Revoke certificates for which no unit has a CSR. + + Goes through all generated certificates and compare against the list of CSRs for all units. + + Returns: + None + """ + provider_certificates = self.get_provider_certificates(relation_id) + requirer_csrs = self.get_requirer_csrs(relation_id) + list_of_csrs = [csr.csr for csr in requirer_csrs] + for certificate in provider_certificates: + if certificate.csr not in list_of_csrs: + self.on.certificate_revocation_request.emit( + certificate=certificate.certificate, + certificate_signing_request=certificate.csr, + ca=certificate.ca, + chain=certificate.chain, + ) + self.remove_certificate(certificate=certificate.certificate) + + def get_outstanding_certificate_requests( + self, relation_id: Optional[int] = None + ) -> List[RequirerCSR]: + """Return CSR's for which no certificate has been issued. + + Args: + relation_id (int): Relation id + + Returns: + list: List of RequirerCSR objects. + """ + requirer_csrs = self.get_requirer_csrs(relation_id=relation_id) + outstanding_csrs: List[RequirerCSR] = [] + for relation_csr in requirer_csrs: + if not self.certificate_issued_for_csr( + app_name=relation_csr.application_name, + csr=relation_csr.csr, + relation_id=relation_id, + ): + outstanding_csrs.append(relation_csr) + return outstanding_csrs + + def get_requirer_csrs(self, relation_id: Optional[int] = None) -> List[RequirerCSR]: + """Return a list of requirers' CSRs. + + It returns CSRs from all relations if relation_id is not specified. + CSRs are returned per relation id, application name and unit name. + + Returns: + list: List[RequirerCSR] + """ + relation_csrs: List[RequirerCSR] = [] + relations = ( + [ + relation + for relation in self.model.relations[self.relationship_name] + if relation.id == relation_id + ] + if relation_id is not None + else self.model.relations.get(self.relationship_name, []) + ) + + for relation in relations: + for unit in relation.units: + requirer_relation_data = _load_relation_data(relation.data[unit]) + unit_csrs_list = requirer_relation_data.get("certificate_signing_requests", []) + for unit_csr in unit_csrs_list: + csr = unit_csr.get("certificate_signing_request") + if not csr: + logger.warning("No CSR found in relation data - Skipping") + continue + ca = unit_csr.get("ca", False) + if not relation.app: + logger.warning("No remote app in relation - Skipping") + continue + relation_csr = RequirerCSR( + relation_id=relation.id, + application_name=relation.app.name, + unit_name=unit.name, + csr=csr, + is_ca=ca, + ) + relation_csrs.append(relation_csr) + return relation_csrs + + def certificate_issued_for_csr( + self, app_name: str, csr: str, relation_id: Optional[int] + ) -> bool: + """Check whether a certificate has been issued for a given CSR. + + Args: + app_name (str): Application name that the CSR belongs to. + csr (str): Certificate Signing Request. + relation_id (Optional[int]): Relation ID + + Returns: + bool: True/False depending on whether a certificate has been issued for the given CSR. + """ + issued_certificates_per_csr = self.get_issued_certificates(relation_id=relation_id) + for issued_certificate in issued_certificates_per_csr: + if issued_certificate.csr == csr and issued_certificate.application_name == app_name: + return csr_matches_certificate(csr, issued_certificate.certificate) + return False + + +class TLSCertificatesRequiresV3(Object): + """TLS certificates requirer class to be instantiated by TLS certificates requirers.""" + + on = CertificatesRequirerCharmEvents() # type: ignore[reportAssignmentType] + + def __init__( + self, + charm: CharmBase, + relationship_name: str, + expiry_notification_time: Optional[int] = None, + ): + """Generate/use private key and observes relation changed event. + + Args: + charm: Charm object + relationship_name: Juju relation name + expiry_notification_time (int): Number of hours prior to certificate expiry. + Used to trigger the CertificateExpiring event. + This value is used as a recommendation only, + The actual value is calculated taking into account the provider's recommendation. + """ + super().__init__(charm, relationship_name) + if not JujuVersion.from_environ().has_secrets: + logger.warning("This version of the TLS library requires Juju secrets (Juju >= 3.0)") + self.relationship_name = relationship_name + self.charm = charm + self.expiry_notification_time = expiry_notification_time + self.framework.observe( + charm.on[relationship_name].relation_changed, self._on_relation_changed + ) + self.framework.observe( + charm.on[relationship_name].relation_broken, self._on_relation_broken + ) + self.framework.observe(charm.on.secret_expired, self._on_secret_expired) + + def get_requirer_csrs(self) -> List[RequirerCSR]: + """Return list of requirer's CSRs from relation unit data. + + Returns: + list: List of RequirerCSR objects. + """ + relation = self.model.get_relation(self.relationship_name) + if not relation: + return [] + requirer_csrs = [] + requirer_relation_data = _load_relation_data(relation.data[self.model.unit]) + requirer_csrs_dict = requirer_relation_data.get("certificate_signing_requests", []) + for requirer_csr_dict in requirer_csrs_dict: + csr = requirer_csr_dict.get("certificate_signing_request") + if not csr: + logger.warning("No CSR found in relation data - Skipping") + continue + ca = requirer_csr_dict.get("ca", False) + relation_csr = RequirerCSR( + relation_id=relation.id, + application_name=self.model.app.name, + unit_name=self.model.unit.name, + csr=csr, + is_ca=ca, + ) + requirer_csrs.append(relation_csr) + return requirer_csrs + + def get_provider_certificates(self) -> List[ProviderCertificate]: + """Return list of certificates from the provider's relation data.""" + provider_certificates: List[ProviderCertificate] = [] + relation = self.model.get_relation(self.relationship_name) + if not relation: + logger.debug("No relation: %s", self.relationship_name) + return [] + if not relation.app: + logger.debug("No remote app in relation: %s", self.relationship_name) + return [] + provider_relation_data = _load_relation_data(relation.data[relation.app]) + provider_certificate_dicts = provider_relation_data.get("certificates", []) + for provider_certificate_dict in provider_certificate_dicts: + certificate = provider_certificate_dict.get("certificate") + if not certificate: + logger.warning("No certificate found in relation data - Skipping") + continue + try: + certificate_object = x509.load_pem_x509_certificate(data=certificate.encode()) + except ValueError as e: + logger.error("Could not load certificate - Skipping: %s", e) + continue + ca = provider_certificate_dict.get("ca") + chain = provider_certificate_dict.get("chain", []) + csr = provider_certificate_dict.get("certificate_signing_request") + recommended_expiry_notification_time = provider_certificate_dict.get( + "recommended_expiry_notification_time" + ) + expiry_time = certificate_object.not_valid_after_utc + validity_start_time = certificate_object.not_valid_before_utc + expiry_notification_time = calculate_expiry_notification_time( + validity_start_time=validity_start_time, + expiry_time=expiry_time, + provider_recommended_notification_time=recommended_expiry_notification_time, + requirer_recommended_notification_time=self.expiry_notification_time, + ) + if not csr: + logger.warning("No CSR found in relation data - Skipping") + continue + revoked = provider_certificate_dict.get("revoked", False) + provider_certificate = ProviderCertificate( + relation_id=relation.id, + application_name=relation.app.name, + csr=csr, + certificate=certificate, + ca=ca, + chain=chain, + revoked=revoked, + expiry_time=expiry_time, + expiry_notification_time=expiry_notification_time, + ) + provider_certificates.append(provider_certificate) + return provider_certificates + + def _add_requirer_csr_to_relation_data(self, csr: str, is_ca: bool) -> None: + """Add CSR to relation data. + + Args: + csr (str): Certificate Signing Request + is_ca (bool): Whether the certificate is a CA certificate + + Returns: + None + """ + relation = self.model.get_relation(self.relationship_name) + if not relation: + raise RuntimeError( + f"Relation {self.relationship_name} does not exist - " + f"The certificate request can't be completed" + ) + for requirer_csr in self.get_requirer_csrs(): + if requirer_csr.csr == csr and requirer_csr.is_ca == is_ca: + logger.info("CSR already in relation data - Doing nothing") + return + new_csr_dict = { + "certificate_signing_request": csr, + "ca": is_ca, + } + requirer_relation_data = _load_relation_data(relation.data[self.model.unit]) + existing_relation_data = requirer_relation_data.get("certificate_signing_requests", []) + new_relation_data = copy.deepcopy(existing_relation_data) + new_relation_data.append(new_csr_dict) + relation.data[self.model.unit]["certificate_signing_requests"] = json.dumps( + new_relation_data + ) + + def _remove_requirer_csr_from_relation_data(self, csr: str) -> None: + """Remove CSR from relation data. + + Args: + csr (str): Certificate signing request + + Returns: + None + """ + relation = self.model.get_relation(self.relationship_name) + if not relation: + raise RuntimeError( + f"Relation {self.relationship_name} does not exist - " + f"The certificate request can't be completed" + ) + if not self.get_requirer_csrs(): + logger.info("No CSRs in relation data - Doing nothing") + return + requirer_relation_data = _load_relation_data(relation.data[self.model.unit]) + existing_relation_data = requirer_relation_data.get("certificate_signing_requests", []) + new_relation_data = copy.deepcopy(existing_relation_data) + for requirer_csr in new_relation_data: + if requirer_csr["certificate_signing_request"] == csr: + new_relation_data.remove(requirer_csr) + relation.data[self.model.unit]["certificate_signing_requests"] = json.dumps( + new_relation_data + ) + + def request_certificate_creation( + self, certificate_signing_request: bytes, is_ca: bool = False + ) -> None: + """Request TLS certificate to provider charm. + + Args: + certificate_signing_request (bytes): Certificate Signing Request + is_ca (bool): Whether the certificate is a CA certificate + + Returns: + None + """ + relation = self.model.get_relation(self.relationship_name) + if not relation: + raise RuntimeError( + f"Relation {self.relationship_name} does not exist - " + f"The certificate request can't be completed" + ) + self._add_requirer_csr_to_relation_data( + certificate_signing_request.decode().strip(), is_ca=is_ca + ) + logger.info("Certificate request sent to provider") + + def request_certificate_revocation(self, certificate_signing_request: bytes) -> None: + """Remove CSR from relation data. + + The provider of this relation is then expected to remove certificates associated to this + CSR from the relation data as well and emit a request_certificate_revocation event for the + provider charm to interpret. + + Args: + certificate_signing_request (bytes): Certificate Signing Request + + Returns: + None + """ + self._remove_requirer_csr_from_relation_data(certificate_signing_request.decode().strip()) + logger.info("Certificate revocation sent to provider") + + def request_certificate_renewal( + self, old_certificate_signing_request: bytes, new_certificate_signing_request: bytes + ) -> None: + """Renew certificate. + + Removes old CSR from relation data and adds new one. + + Args: + old_certificate_signing_request: Old CSR + new_certificate_signing_request: New CSR + + Returns: + None + """ + try: + self.request_certificate_revocation( + certificate_signing_request=old_certificate_signing_request + ) + except RuntimeError: + logger.warning("Certificate revocation failed.") + self.request_certificate_creation( + certificate_signing_request=new_certificate_signing_request + ) + logger.info("Certificate renewal request completed.") + + def get_assigned_certificates(self) -> List[ProviderCertificate]: + """Get a list of certificates that were assigned to this unit. + + Returns: + List: List[ProviderCertificate] + """ + assigned_certificates = [] + for requirer_csr in self.get_certificate_signing_requests(fulfilled_only=True): + if cert := self._find_certificate_in_relation_data(requirer_csr.csr): + assigned_certificates.append(cert) + return assigned_certificates + + def get_expiring_certificates(self) -> List[ProviderCertificate]: + """Get a list of certificates that were assigned to this unit that are expiring or expired. + + Returns: + List: List[ProviderCertificate] + """ + expiring_certificates: List[ProviderCertificate] = [] + for requirer_csr in self.get_certificate_signing_requests(fulfilled_only=True): + if cert := self._find_certificate_in_relation_data(requirer_csr.csr): + if not cert.expiry_time or not cert.expiry_notification_time: + continue + if datetime.now(timezone.utc) > cert.expiry_notification_time: + expiring_certificates.append(cert) + return expiring_certificates + + def get_certificate_signing_requests( + self, + fulfilled_only: bool = False, + unfulfilled_only: bool = False, + ) -> List[RequirerCSR]: + """Get the list of CSR's that were sent to the provider. + + You can choose to get only the CSR's that have a certificate assigned or only the CSR's + that don't. + + Args: + fulfilled_only (bool): This option will discard CSRs that don't have certificates yet. + unfulfilled_only (bool): This option will discard CSRs that have certificates signed. + + Returns: + List of RequirerCSR objects. + """ + csrs = [] + for requirer_csr in self.get_requirer_csrs(): + cert = self._find_certificate_in_relation_data(requirer_csr.csr) + if (unfulfilled_only and cert) or (fulfilled_only and not cert): + continue + csrs.append(requirer_csr) + + return csrs + + def _on_relation_changed(self, event: RelationChangedEvent) -> None: + """Handle relation changed event. + + Goes through all providers certificates that match a requested CSR. + + If the provider certificate is revoked, emit a CertificateInvalidateEvent, + otherwise emit a CertificateAvailableEvent. + + Remove the secret for revoked certificate, or add a secret with the correct expiry + time for new certificates. + + Args: + event: Juju event + + Returns: + None + """ + if not event.app: + logger.warning("No remote app in relation - Skipping") + return + if not _relation_data_is_valid(event.relation, event.app, PROVIDER_JSON_SCHEMA): + logger.debug("Relation data did not pass JSON Schema validation") + return + provider_certificates = self.get_provider_certificates() + requirer_csrs = [ + certificate_creation_request.csr + for certificate_creation_request in self.get_requirer_csrs() + ] + for certificate in provider_certificates: + if certificate.csr in requirer_csrs: + csr_in_sha256_hex = get_sha256_hex(certificate.csr) + if certificate.revoked: + with suppress(SecretNotFoundError): + logger.debug( + "Removing secret with label %s", + f"{LIBID}-{csr_in_sha256_hex}", + ) + secret = self.model.get_secret(label=f"{LIBID}-{csr_in_sha256_hex}") + secret.remove_all_revisions() + self.on.certificate_invalidated.emit( + reason="revoked", + certificate=certificate.certificate, + certificate_signing_request=certificate.csr, + ca=certificate.ca, + chain=certificate.chain, + ) + else: + try: + logger.debug( + "Setting secret with label %s", f"{LIBID}-{csr_in_sha256_hex}" + ) + secret = self.model.get_secret(label=f"{LIBID}-{csr_in_sha256_hex}") + secret.set_content( + {"certificate": certificate.certificate, "csr": certificate.csr} + ) + secret.set_info( + expire=self._get_next_secret_expiry_time(certificate), + ) + except SecretNotFoundError: + logger.debug( + "Creating new secret with label %s", f"{LIBID}-{csr_in_sha256_hex}" + ) + secret = self.charm.unit.add_secret( + {"certificate": certificate.certificate, "csr": certificate.csr}, + label=f"{LIBID}-{csr_in_sha256_hex}", + expire=self._get_next_secret_expiry_time(certificate), + ) + self.on.certificate_available.emit( + certificate_signing_request=certificate.csr, + certificate=certificate.certificate, + ca=certificate.ca, + chain=certificate.chain, + ) + + def _get_next_secret_expiry_time(self, certificate: ProviderCertificate) -> Optional[datetime]: + """Return the expiry time or expiry notification time. + + Extracts the expiry time from the provided certificate, calculates the + expiry notification time and return the closest of the two, that is in + the future. + + Args: + certificate: ProviderCertificate object + + Returns: + Optional[datetime]: None if the certificate expiry time cannot be read, + next expiry time otherwise. + """ + if not certificate.expiry_time or not certificate.expiry_notification_time: + return None + return _get_closest_future_time( + certificate.expiry_notification_time, + certificate.expiry_time, + ) + + def _on_relation_broken(self, event: RelationBrokenEvent) -> None: + """Handle Relation Broken Event. + + Emitting `all_certificates_invalidated` from `relation-broken` rather + than `relation-departed` since certs are stored in app data. + + Args: + event: Juju event + + Returns: + None + """ + self.on.all_certificates_invalidated.emit() + + def _on_secret_expired(self, event: SecretExpiredEvent) -> None: + """Handle Secret Expired Event. + + Loads the certificate from the secret, and will emit 1 of 2 + events. + + If the certificate is not yet expired, emits CertificateExpiringEvent + and updates the expiry time of the secret to the exact expiry time on + the certificate. + + If the certificate is expired, emits CertificateInvalidedEvent and + deletes the secret. + + Args: + event (SecretExpiredEvent): Juju event + """ + if not event.secret.label or not event.secret.label.startswith(f"{LIBID}-"): + return + csr = event.secret.get_content()["csr"] + provider_certificate = self._find_certificate_in_relation_data(csr) + if not provider_certificate: + # A secret expired but we did not find matching certificate. Cleaning up + event.secret.remove_all_revisions() + return + + if not provider_certificate.expiry_time: + # A secret expired but matching certificate is invalid. Cleaning up + event.secret.remove_all_revisions() + return + + if datetime.now(timezone.utc) < provider_certificate.expiry_time: + logger.warning("Certificate almost expired") + self.on.certificate_expiring.emit( + certificate=provider_certificate.certificate, + expiry=provider_certificate.expiry_time.isoformat(), + ) + event.secret.set_info( + expire=provider_certificate.expiry_time, + ) + else: + logger.warning("Certificate is expired") + self.on.certificate_invalidated.emit( + reason="expired", + certificate=provider_certificate.certificate, + certificate_signing_request=provider_certificate.csr, + ca=provider_certificate.ca, + chain=provider_certificate.chain, + ) + self.request_certificate_revocation(provider_certificate.certificate.encode()) + event.secret.remove_all_revisions() + + def _find_certificate_in_relation_data(self, csr: str) -> Optional[ProviderCertificate]: + """Return the certificate that match the given CSR.""" + for provider_certificate in self.get_provider_certificates(): + if provider_certificate.csr != csr: + continue + return provider_certificate + return None diff --git a/poetry.lock b/poetry.lock index d81fb76a..8091203b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -151,13 +151,13 @@ typecheck = ["mypy"] [[package]] name = "cachetools" -version = "5.4.0" +version = "5.5.0" description = "Extensible memoizing collections and decorators" optional = false python-versions = ">=3.7" files = [ - {file = "cachetools-5.4.0-py3-none-any.whl", hash = "sha256:3ae3b49a3d5e28a77a0be2b37dbcb89005058959cb2323858c2657c4a8cab474"}, - {file = "cachetools-5.4.0.tar.gz", hash = "sha256:b8adc2e7c07f105ced7bc56dbb6dfbe7c4a00acce20e2227b3f355be89bc6827"}, + {file = "cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292"}, + {file = "cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a"}, ] [[package]] @@ -610,13 +610,13 @@ tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipyth [[package]] name = "google-auth" -version = "2.33.0" +version = "2.34.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" files = [ - {file = "google_auth-2.33.0-py2.py3-none-any.whl", hash = "sha256:8eff47d0d4a34ab6265c50a106a3362de6a9975bb08998700e389f857e4d39df"}, - {file = "google_auth-2.33.0.tar.gz", hash = "sha256:d6a52342160d7290e334b4d47ba390767e4438ad0d45b7630774533e82655b95"}, + {file = "google_auth-2.34.0-py2.py3-none-any.whl", hash = "sha256:72fd4733b80b6d777dcde515628a9eb4a577339437012874ea286bca7261ee65"}, + {file = "google_auth-2.34.0.tar.gz", hash = "sha256:8eb87396435c19b20d32abd2f984e31c191a15284af72eb922f10e5bde9c04cc"}, ] [package.dependencies] @@ -626,7 +626,7 @@ rsa = ">=3.1.4,<5" [package.extras] aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] -enterprise-cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"] +enterprise-cert = ["cryptography", "pyopenssl"] pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] reauth = ["pyu2f (>=0.1.5)"] requests = ["requests (>=2.20.0,<3.0.0.dev0)"] @@ -2139,4 +2139,4 @@ files = [ [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "c7cb79450373864dd99bf571c2f6489124755b12d1b8f04cd9a8241326fa7d49" +content-hash = "40d08e01f1ad3128ac6f814a327a24feccf1a14232d68aa53fa689e5612799a9" diff --git a/pyproject.toml b/pyproject.toml index ceffc1af..cc22858c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,6 +45,7 @@ pytest = "^8.1.1" parameterized = "^0.9.0" [tool.poetry.group.integration.dependencies] +ops = "^2.15.0" allure-pytest = "^2.13.5" tenacity = "^8.2.3" pymongo = "^4.7.3" diff --git a/src/charm.py b/src/charm.py index c91b4236..57c15413 100755 --- a/src/charm.py +++ b/src/charm.py @@ -3,12 +3,24 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. +from ops.main import main import json from exceptions import MissingSecretError -from ops.pebble import PathError, ProtocolError -from typing import Set, Optional, Dict +from ops.pebble import PathError, ProtocolError, Layer from node_port import NodePortManager + +from typing import Set, Optional, Dict +from charms.mongodb.v0.config_server_interface import ClusterRequirer +from tenacity import ( + Retrying, + retry, + stop_after_attempt, + wait_fixed, +) + + from charms.mongos.v0.set_status import MongosStatusHandler +from charms.mongodb.v0.mongodb_tls import MongoDBTLS from charms.mongodb.v0.mongodb_secrets import SecretCache from charms.mongodb.v0.mongodb_secrets import generate_secret_label from charms.mongodb.v1.mongos import MongosConfiguration, MongosConnection @@ -16,11 +28,13 @@ MongoDBUser, ) +from charms.mongodb.v1.helpers import get_mongos_args + from config import Config import ops from ops.model import BlockedStatus, Container, Relation, ActiveStatus, Unit -from ops.charm import StartEvent, RelationDepartedEvent +from ops.charm import StartEvent, RelationDepartedEvent, ConfigChangedEvent import logging @@ -43,7 +57,7 @@ class MissingConfigServerError(Exception): """Raised when mongos expects to be connected to a config-server but is not.""" -class ExtraDataDirError: +class ExtraDataDirError(Exception): """Raised when there is unexpected data in the data directory.""" @@ -52,23 +66,41 @@ class MongosCharm(ops.CharmBase): def __init__(self, *args): super().__init__(*args) - self.framework.observe( - self.on.mongos_pebble_ready, self._on_mongos_pebble_ready - ) + self.framework.observe(self.on.mongos_pebble_ready, self._on_mongos_pebble_ready) self.framework.observe(self.on.start, self._on_start) self.framework.observe(self.on.update_status, self._on_update_status) + self.tls = MongoDBTLS(self, Config.Relations.PEERS, substrate=Config.SUBSTRATE) self.role = Config.Role.MONGOS self.secrets = SecretCache(self) self.status = MongosStatusHandler(self) + self.cluster = ClusterRequirer(self, substrate=Config.SUBSTRATE) self.node_port_manager = NodePortManager(self) # BEGIN: hook functions + def _on_config_changed(self, event: ConfigChangedEvent) -> None: + """Listen to changes in the application configuration.""" + if self.expose_external not in Config.ExternalConnections.VALID_EXTERNAL_CONFIG: + logger.error( + "External configuration: %s for expose-external is not valid, should be one of: %s", + self.expose_external, + Config.ExternalConnections.VALID_EXTERNAL_CONFIG, + ) + self.unit.status = Config.Status.INVALID_EXTERNAL_CONFIG + + if self.expose_external == Config.ExternalConnections.EXTERNAL_NODEPORT: + self.update_external_services() + + if self.expose_external == Config.ExternalConnections.NONE: + # TODO future PR - support revoking external access + pass + + # TODO DPE-5235 support updating data-integrator clients to have/not have public IP + def _on_mongos_pebble_ready(self, event) -> None: """Configure MongoDB pebble layer specification.""" # any external services must be created before setting of properties - self.update_external_services() if not self.is_integrated_to_config_server(): logger.info( @@ -84,7 +116,7 @@ def _on_mongos_pebble_ready(self, event) -> None: return try: - # mongod needs keyFile and TLS certificates on filesystem + # mongos needs keyFile and TLS certificates on filesystem self._push_keyfile_to_workload(container) self._pull_licenses(container) self._set_data_dir_permissions(container) @@ -105,12 +137,18 @@ def _on_start(self, event: StartEvent) -> None: # start hooks are fired before relation hooks and `mongos` requires a config-server in # order to start. Wait to receive config-server info from the relation event before # starting `mongos` daemon - self.status.set_and_share_status( - BlockedStatus("Missing relation to config-server.") - ) + self.status.set_and_share_status(BlockedStatus("Missing relation to config-server.")) def _on_update_status(self, _): """Handle the update status event""" + if self.expose_external not in Config.ExternalConnections.VALID_EXTERNAL_CONFIG: + logger.error( + "External configuration: %s for expose-external is not valid, should be one of: %s", + self.expose_external, + Config.ExternalConnections.VALID_EXTERNAL_CONFIG, + ) + self.unit.status = Config.Status.INVALID_EXTERNAL_CONFIG + if self.unit.status == Config.Status.UNHEALTHY_UPGRADE: return @@ -118,9 +156,7 @@ def _on_update_status(self, _): logger.info( "Missing integration to config-server. mongos cannot run unless connected to config-server." ) - self.status.set_and_share_status( - BlockedStatus("Missing relation to config-server.") - ) + self.status.set_and_share_status(BlockedStatus("Missing relation to config-server.")) return self.status.set_and_share_status(ActiveStatus()) @@ -130,22 +166,29 @@ def _on_update_status(self, _): # BEGIN: helper functions def update_external_services(self) -> None: """Attempts to update any external Kubernetes services.""" - if not self.is_external_client: - return - # every unit attempts to create a nodeport service # if exists, will silently continue self.node_port_manager.apply_service( - service=self.node_port_manager.build_node_port_services( - port=Config.MONGOS_PORT - ) + service=self.node_port_manager.build_node_port_services(port=Config.MONGOS_PORT) ) + def get_keyfile_contents(self) -> str | None: + """Retrieves the contents of the keyfile on host machine.""" + # wait for keyFile to be created by leader unit + if not self.get_secret(APP_SCOPE, Config.Secrets.SECRET_KEYFILE_NAME): + logger.debug("waiting to receive keyfile contents from config-server.") + + try: + container = self.unit.get_container(Config.CONTAINER_NAME) + key = container.pull(f"{Config.MONGOD_CONF_DIR}/{Config.TLS.KEY_FILE_NAME}") + return key.read() + except PathError: + logger.info("no keyfile present") + return None + def is_integrated_to_config_server(self) -> bool: """Returns True if the mongos application is integrated to a config-server.""" - return ( - self.model.get_relation(Config.Relations.CLUSTER_RELATIONS_NAME) is not None - ) + return self.model.get_relation(Config.Relations.CLUSTER_RELATIONS_NAME) is not None def _get_mongos_config_for_user( self, user: MongoDBUser, hosts: Set[str] @@ -201,24 +244,30 @@ def remove_secret(self, scope, key) -> None: content = secret.get_content() if not content.get(key) or content[key] == Config.Secrets.SECRET_DELETED_LABEL: - logger.error( - f"Non-existing secret {scope}:{key} was attempted to be removed." - ) + logger.error(f"Non-existing secret {scope}:{key} was attempted to be removed.") return content[key] = Config.Secrets.SECRET_DELETED_LABEL secret.set_content(content) + @retry( + stop=stop_after_attempt(3), + wait=wait_fixed(2), + reraise=True, + ) + def stop_mongos_service(self): + """Stop mongos service.""" + container = self.unit.get_container(Config.CONTAINER_NAME) + container.stop(Config.SERVICE_NAME) + def restart_charm_services(self): """Restart mongos service.""" container = self.unit.get_container(Config.CONTAINER_NAME) container.stop(Config.SERVICE_NAME) - container.add_layer(Config.CONTAINER_NAME, self._mongod_layer, combine=True) - container.replan() - - self._connect_mongodb_exporter() - self._connect_pbm_agent() + for _ in Retrying(stop=stop_after_attempt(3), wait=wait_fixed(2), reraise=True): + container.add_layer(Config.CONTAINER_NAME, self._mongos_layer, combine=True) + container.replan() def set_database(self, database: str) -> None: """Updates the database requested for the mongos user.""" @@ -228,9 +277,7 @@ def set_database(self, database: str) -> None: return # a mongos shard can only be related to one config server - config_server_rel = self.model.relations[ - Config.Relations.CLUSTER_RELATIONS_NAME - ][0] + config_server_rel = self.model.relations[Config.Relations.CLUSTER_RELATIONS_NAME][0] self.cluster.database_requires.update_relation_data( config_server_rel.id, {DATABASE_TAG: database} ) @@ -291,7 +338,19 @@ def get_mongos_host(self) -> str: The host for mongos can be either the Unix Domain Socket or an IP address depending on how the client wishes to connect to mongos (inside Juju or outside). """ - return self.unit_host + return self.unit_host(self.unit) + + def get_mongos_hosts(self) -> Set: + """Returns the host for mongos as a str. + + The host for mongos can be either the Unix Domain Socket or an IP address depending on how + the client wishes to connect to mongos (inside Juju or outside). + """ + hosts = {self.unit_host(self.unit)} + for unit in self.peers_units: + hosts.add(self.unit_host(unit)) + + return hosts @staticmethod def _generate_relation_departed_key(rel_id: int) -> str: @@ -335,9 +394,7 @@ def _pull_licenses(container: Container) -> None: for license_name in licenses: try: - license_file = container.pull( - path=Config.get_license_path(license_name) - ) + license_file = container.pull(path=Config.get_license_path(license_name)) f = open(f"LICENSE_{license_name}", "x") f.write(str(license_file.read())) f.close() @@ -351,17 +408,13 @@ def _set_data_dir_permissions(container: Container) -> None: Until the ability to set fsGroup and fsGroupChangePolicy via Pod securityContext is available, we fix permissions incorrectly with chown. """ - for path in [Config.DATA_DIR, Config.LOG_DIR, Config.LogRotate.LOG_STATUS_DIR]: + for path in [Config.DATA_DIR]: paths = container.list_files(path, itself=True) if not len(paths) == 1: - raise ExtraDataDirError( - "list_files doesn't return only the directory itself" - ) + raise ExtraDataDirError("list_files doesn't return only the directory itself") logger.debug(f"Data directory ownership: {paths[0].user}:{paths[0].group}") if paths[0].user != Config.UNIX_USER or paths[0].group != Config.UNIX_GROUP: - container.exec( - f"chown {Config.UNIX_USER}:{Config.UNIX_GROUP} -R {path}".split() - ) + container.exec(f"chown {Config.UNIX_USER}:{Config.UNIX_GROUP} -R {path}".split()) def push_file_to_unit( self, @@ -396,6 +449,51 @@ def unit_host(self, unit: Unit) -> str: # END: helper functions # BEGIN: properties + @property + def expose_external(self) -> Optional[str]: + """Returns mode of exposure for external connections.""" + + if self.app_peer_data.get("expose-external") == "none": + return + + return self.app_peer_data.get("expose-external") + + @property + def peers_units(self) -> list[Unit]: + """Get peers units in a safe way.""" + if not self._peers: + return [] + else: + return self._peers.units + + @property + def _mongos_layer(self) -> Layer: + """Returns a Pebble configuration layer for mongos.""" + if not (get_config_server_uri := self.cluster.get_config_server_uri()): + logger.error("cannot start mongos without a config_server_db") + raise MissingConfigServerError() + + layer_config = { + "summary": "mongos layer", + "description": "Pebble config layer for mongos router", + "services": { + "mongos": { + "override": "replace", + "summary": "mongos", + "command": "mongos " + + get_mongos_args( + self.mongos_config, + snap_install=False, + config_server_db=get_config_server_uri, + ), + "startup": "enabled", + "user": Config.UNIX_USER, + "group": Config.UNIX_GROUP, + } + }, + } + return Layer(layer_config) # type: ignore + @property def mongos_initialised(self) -> bool: """Check if mongos is initialised.""" @@ -415,46 +513,36 @@ def _unit_ip(self) -> str: return str(self.model.get_binding(Config.Relations.PEERS).network.bind_address) @property - def is_external_client(self) -> Optional[str]: + def is_external_client(self) -> bool: """Returns the connectivity mode which mongos should use. Note that for K8s routers this should always default to True. However we still include this function so that we can have parity on properties with the K8s and VM routers. """ - return True + return self.expose_external == Config.Ex @property def database(self) -> Optional[str]: - """Returns a mapping of databases requested by integrated clients. + """Returns a database to be used by mongos admin user. - TODO: Future PR. This should be modified to work for many clients. + TODO: Future PR. There should be a separate function with a mapping of databases for the + associated clients. """ - if not self._peers: - logger.info("Peer relation not joined yet.") - # TODO future PR implement relation interface between host application mongos and use - # host application name in generation of db name. - return "mongos-database" - - return self.app_peer_data.get(DATABASE_TAG, "mongos-database") + return f"{self.app.name}_{self.model.name}" @property def extra_user_roles(self) -> Set[str]: - """Returns a mapping of user roles requested by integrated clients. + """Returns the user roles of the mongos charm. - TODO: Future PR. This should be modified to work for many clients. + TODO: Future PR. There should be a separate function with a mapping of roles for the + associated clients. """ - if not self._peers: - logger.info("Peer relation not joined yet.") - return None - - return self.app_peer_data.get(USER_ROLES_TAG, "default") + return Config.USER_ROLE_CREATE_USERS @property def mongos_config(self) -> MongosConfiguration: """Generates a MongoDBConfiguration object for mongos in the deployment of MongoDB.""" - hosts = [self.get_mongos_host()] - # TODO: Future PR. Ensure that this works for external connections with NodePort - port = Config.MONGOS_PORT if self.is_external_client else None + hosts = self.get_mongos_hosts() external_ca, _ = self.tls.get_tls_files(internal=False) internal_ca, _ = self.tls.get_tls_files(internal=True) @@ -463,7 +551,8 @@ def mongos_config(self) -> MongosConfiguration: username=self.get_secret(APP_SCOPE, Config.Secrets.USERNAME), password=self.get_secret(APP_SCOPE, Config.Secrets.PASSWORD), hosts=hosts, - port=port, + # unlike the vm mongos charm, the K8s charm does not communicate with the unix socket + port=Config.MONGOS_PORT, roles=self.extra_user_roles, tls_external=external_ca is not None, tls_internal=internal_ca is not None, @@ -502,8 +591,20 @@ def upgrade_in_progress(self) -> bool: """ return False + @property + def config_server_db(self) -> str: + """Fetch current the config server database that this unit is connected to.""" + if not ( + config_server_relation := self.model.get_relation( + Config.Relations.CLUSTER_RELATIONS_NAME + ) + ): + return "" + + return config_server_relation.app.name + # END: properties if __name__ == "__main__": - ops.main(MongosCharm) + main(MongosCharm) diff --git a/src/config.py b/src/config.py index 9ac7aa19..aa5eae34 100644 --- a/src/config.py +++ b/src/config.py @@ -13,6 +13,20 @@ class Config: MONGODB_PORT = 27017 SUBSTRATE = "k8s" CONTAINER_NAME = "mongos" + USER_ROLE_CREATE_USERS = "admin" + SERVICE_NAME = "mongod" # this must match the name of the service in the ROCK + MONGOD_CONF_DIR = "/etc/mongod" + UNIX_USER = "mongodb" + UNIX_GROUP = "mongodb" + LICENSE_PATH = "/licenses/LICENSE" + DATA_DIR = "/var/lib/mongodb" + + class ExternalConnections: + """External Connections related config for MongoDB Charm.""" + + NONE = "none" + EXTERNAL_NODEPORT = "nodeport" + VALID_EXTERNAL_CONFIG = [NONE, EXTERNAL_NODEPORT] class Relations: """Relations related config for MongoDB Charm.""" @@ -61,6 +75,13 @@ class Status: # TODO Future PR add more status messages here as constants UNHEALTHY_UPGRADE = BlockedStatus("Unhealthy after upgrade.") + INVALID_EXTERNAL_CONFIG = BlockedStatus("Config option for expose-external not valid.") + + class Substrate: + """Substrate related constants.""" + + VM = "vm" + K8S = "k8s" class Role: """Role config names for MongoDB Charm.""" diff --git a/tests/integration/client_relations/__init__.py b/tests/integration/client_relations/__init__.py new file mode 100644 index 00000000..db3bfe1a --- /dev/null +++ b/tests/integration/client_relations/__init__.py @@ -0,0 +1,2 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. diff --git a/tests/integration/client_relations/helpers.py b/tests/integration/client_relations/helpers.py new file mode 100644 index 00000000..365df956 --- /dev/null +++ b/tests/integration/client_relations/helpers.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import json +import subprocess +import logging + +from typing import Any, Dict, List, Optional, Tuple + +from pathlib import Path +import yaml +from pymongo import MongoClient + +from dateutil.parser import parse +from pytest_operator.plugin import OpsTest +from tenacity import Retrying, stop_after_delay, wait_fixed +from tenacity import ( + RetryError, +) + +logger = logging.getLogger(__name__) + +PORT_MAPPING_INDEX = 4 + + +def get_port_from_node_port(ops_test: OpsTest, node_port_name: str) -> None: + node_port_cmd = ( + f"kubectl get svc -n {ops_test.model.name} | grep NodePort | grep {node_port_name}" + ) + result = subprocess.run(node_port_cmd, shell=True, capture_output=True, text=True) + if result.returncode: + logger.info("was not able to find nodeport") + assert False, f"Command: {node_port_cmd} to find node port failed." + + assert ( + len(result.stdout.splitlines()) > 0 + ), "No port information available for expected service" + + # port information is available at PORT_MAPPING_INDEX + port_mapping = result.stdout.split()[PORT_MAPPING_INDEX] + + # port information is of the form 27018:30259/TCP + return port_mapping.split(":")[1].split("/")[0] + + +def assert_node_port_available(ops_test: OpsTest, node_port_name: str) -> None: + assert get_port_from_node_port( + ops_test, node_port_name + ), "No port information for expected service" + + +def get_public_k8s_ip() -> str: + result = subprocess.run("kubectl get nodes", shell=True, capture_output=True, text=True) + + if result.returncode: + logger.info("failed to retrieve public facing k8s IP") + assert False, "failed to retrieve public facing k8s IP" + + # port information is the first item of the last line + port_mapping = result.stdout.splitlines()[-1].split()[0] + + # port mapping is of the form ip-172-31-18-133 + return port_mapping.split("ip-")[1].replace("-", ".") diff --git a/tests/integration/client_relations/test_external_client_relations.py b/tests/integration/client_relations/test_external_client_relations.py new file mode 100644 index 00000000..bba58e43 --- /dev/null +++ b/tests/integration/client_relations/test_external_client_relations.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + + +import pytest +from pytest_operator.plugin import OpsTest + +from ..helpers import ( + MONGOS_APP_NAME, + MongoClient, + build_cluster, + deploy_cluster_components, + get_mongos_user_password, + MongoClient, +) + +from .helpers import ( + assert_node_port_available, + get_port_from_node_port, + get_public_k8s_ip, +) + +TEST_USER_NAME = "TestUserName1" +TEST_USER_PWD = "Test123" +TEST_DB_NAME = "my-test-db" + + +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_build_and_deploy(ops_test: OpsTest): + """Build and deploy a sharded cluster.""" + await deploy_cluster_components(ops_test) + await build_cluster(ops_test) + + +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_mongos_external_connections(ops_test: OpsTest) -> None: + """Tests that mongos is accessible externally.""" + configuration_parameters = {"expose-external": "nodeport"} + + # apply new configuration options + await ops_test.model.applications[MONGOS_APP_NAME].set_config(configuration_parameters) + for unit_id in range(len(ops_test.model.applications[MONGOS_APP_NAME].units)): + assert_node_port_available( + ops_test, node_port_name=f"{MONGOS_APP_NAME}-{unit_id}-external" + ) + + exposed_node_port = get_port_from_node_port(ops_test, node_port_name="mongos-k8s-nodeport") + public_k8s_ip = get_public_k8s_ip() + username, password = await get_mongos_user_password(ops_test, MONGOS_APP_NAME) + external_mongos_client = MongoClient( + f"mongodb://{username}:{password}@{public_k8s_ip}:{exposed_node_port}" + ) + external_mongos_client.admin.command("usersInfo") + external_mongos_client.close() diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 2980e58a..990cc400 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -6,15 +6,33 @@ import subprocess import logging -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Tuple + +from pathlib import Path +import yaml +from pymongo import MongoClient + from dateutil.parser import parse from pytest_operator.plugin import OpsTest from tenacity import Retrying, stop_after_delay, wait_fixed +from tenacity import ( + RetryError, +) logger = logging.getLogger(__name__) PORT_MAPPING_INDEX = 4 -MONGOS_APP_NAME = "mongos" + +MONGOS_APP_NAME = "mongos-k8s" +MONGODB_CHARM_NAME = "mongodb-k8s" +CONFIG_SERVER_APP_NAME = "config-server" +SHARD_APP_NAME = "shard0" +MONGOS_PORT = 27018 +SHARD_REL_NAME = "sharding" +CONFIG_SERVER_REL_NAME = "config-server" +CLUSTER_REL_NAME = "cluster" + +METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) class Status: @@ -139,50 +157,217 @@ async def wait_for_mongos_units_blocked( try: old_interval = (await ops_test.model.get_config())[hook_interval_key] await ops_test.model.set_config({hook_interval_key: "1m"}) - for attempt in Retrying( - stop=stop_after_delay(timeout), wait=wait_fixed(1), reraise=True - ): + for attempt in Retrying(stop=stop_after_delay(timeout), wait=wait_fixed(1), reraise=True): with attempt: await check_all_units_blocked_with_status(ops_test, db_app_name, status) finally: await ops_test.model.set_config({hook_interval_key: old_interval}) -def get_port_from_node_port(ops_test: OpsTest, node_port_name: str) -> None: - node_port_cmd = f"kubectl get svc -n {ops_test.model.name} | grep NodePort | grep {node_port_name}" - result = subprocess.run(node_port_cmd, shell=True, capture_output=True, text=True) - if result.returncode: - logger.info("was not able to find nodeport") - assert False, f"Command: {node_port_cmd} to find node port failed." +async def deploy_cluster_components(ops_test: OpsTest) -> None: + """Deploys all cluster components and waits for idle.""" + mongos_charm = await ops_test.build_charm(".") + resources = {"mongodb-image": METADATA["resources"]["mongodb-image"]["upstream-source"]} + await ops_test.model.deploy( + mongos_charm, + resources=resources, + application_name=MONGOS_APP_NAME, + series="jammy", + ) + + await ops_test.model.deploy( + MONGODB_CHARM_NAME, + application_name=CONFIG_SERVER_APP_NAME, + channel="6/edge", + config={"role": "config-server"}, + ) + await ops_test.model.deploy( + MONGODB_CHARM_NAME, + application_name=SHARD_APP_NAME, + channel="6/edge", + config={"role": "shard"}, + ) + + await ops_test.model.wait_for_idle( + apps=[MONGOS_APP_NAME, SHARD_APP_NAME, CONFIG_SERVER_APP_NAME], + idle_period=10, + raise_on_blocked=False, + raise_on_error=False, # Removed this once DPE-4996 is resolved. + ) + + +async def build_cluster(ops_test: OpsTest) -> None: + """Builds the cluster by integrating the components.""" + # prepare sharded cluster + await ops_test.model.wait_for_idle( + apps=[CONFIG_SERVER_APP_NAME, SHARD_APP_NAME], + idle_period=10, + raise_on_blocked=False, + raise_on_error=False, # Removed this once DPE-4996 is resolved. + ) + await ops_test.model.integrate( + f"{SHARD_APP_NAME}:{SHARD_REL_NAME}", + f"{CONFIG_SERVER_APP_NAME}:{CONFIG_SERVER_REL_NAME}", + ) + await ops_test.model.wait_for_idle( + apps=[CONFIG_SERVER_APP_NAME, SHARD_APP_NAME], + idle_period=20, + raise_on_blocked=False, + raise_on_error=False, # https://github.com/canonical/mongodb-k8s-operator/issues/301 + ) - assert ( - len(result.stdout.splitlines()) > 0 - ), "No port information available for expected service" + # connect sharded cluster to mongos + await ops_test.model.integrate( + f"{MONGOS_APP_NAME}:{CLUSTER_REL_NAME}", + f"{CONFIG_SERVER_APP_NAME}:{CLUSTER_REL_NAME}", + ) + await ops_test.model.wait_for_idle( + apps=[CONFIG_SERVER_APP_NAME, SHARD_APP_NAME, MONGOS_APP_NAME], + idle_period=20, + status="active", + raise_on_error=False, # Removed this once DPE-4996 is resolved. + ) - # port information is available at PORT_MAPPING_INDEX - port_mapping = result.stdout.split()[PORT_MAPPING_INDEX] - # port information is of the form 27018:30259/TCP - return port_mapping.split(":")[1].split("/")[0] +async def get_application_name(ops_test: OpsTest, application_name: str) -> str: + """Returns the Application in the juju model that matches the provided application name. + This enables us to retrieve the name of the deployed application in an existing model, while + ignoring some test specific applications. + Note: if multiple applications with the application name exist, the first one found will be + returned. + """ + status = await ops_test.model.get_status() + + for application in ops_test.model.applications: + # note that format of the charm field is not exactly "mongodb" but instead takes the form + # of `local:focal/mongodb-6` + if application_name in status["applications"][application]["charm"]: + return application + + return None + + +async def get_address_of_unit( + ops_test: OpsTest, unit_id: int, app_name: str = MONGOS_APP_NAME +) -> str: + """Retrieves the address of the unit based on provided id.""" + status = await ops_test.model.get_status() + return status["applications"][app_name]["units"][f"{app_name}/{unit_id}"]["address"] + + +async def get_secret_data(ops_test, secret_uri) -> Dict: + """Returns secret relation data.""" + secret_unique_id = secret_uri.split("/")[-1] + complete_command = f"show-secret {secret_uri} --reveal --format=json" + _, stdout, _ = await ops_test.juju(*complete_command.split()) + return json.loads(stdout)[secret_unique_id]["content"]["Data"] + + +async def get_application_relation_data( + ops_test: OpsTest, + application_name: str, + relation_name: str, + key: str, + relation_id: str = None, + relation_alias: str = None, +) -> Optional[str]: + """Get relation data for an application. + + Args: + ops_test: The ops test framework instance + application_name: The name of the application + relation_name: name of the relation to get connection data from + key: key of data to be retrieved + relation_id: id of the relation to get connection data from + relation_alias: alias of the relation (like a connection name) + to get connection data from + Returns: + the that that was requested or None + if no data in the relation + Raises: + ValueError if it's not possible to get application unit data + or if there is no data for the particular relation endpoint + and/or alias. + """ + unit = ops_test.model.applications[application_name].units[0] + raw_data = (await ops_test.juju("show-unit", unit.name))[1] + if not raw_data: + raise ValueError(f"no unit info could be grabbed for { unit.name}") + data = yaml.safe_load(raw_data) + # Filter the data based on the relation name. + relation_data = [v for v in data[unit.name]["relation-info"] if v["endpoint"] == relation_name] + + if relation_id: + # Filter the data based on the relation id. + relation_data = [v for v in relation_data if v["relation-id"] == relation_id] + + if relation_alias: + # Filter the data based on the cluster/relation alias. + relation_data = [ + v + for v in relation_data + if json.loads(v["application-data"]["data"])["alias"] == relation_alias + ] + + if len(relation_data) == 0: + raise ValueError( + f"no relation data could be grabbed on relation with endpoint {relation_name} and alias {relation_alias}" + ) -def assert_node_port_available(ops_test: OpsTest, node_port_name: str) -> None: - assert get_port_from_node_port( - ops_test, node_port_name - ), "No port information for expected service" + return relation_data[0]["application-data"].get(key) -def get_public_k8s_ip() -> str: - result = subprocess.run( - "kubectl get nodes", shell=True, capture_output=True, text=True +async def get_mongos_user_password(ops_test: OpsTest, app_name=MONGOS_APP_NAME) -> Tuple[str, str]: + secret_uri = await get_application_relation_data( + ops_test, app_name, relation_name="cluster", key="secret-user" ) - if result.returncode: - logger.info("failed to retrieve public facing k8s IP") - assert False, "failed to retrieve public facing k8s IP" + secret_data = await get_secret_data(ops_test, secret_uri) + return secret_data.get("username"), secret_data.get("password") + + +async def check_mongos( + ops_test: OpsTest, + unit_id: int, + auth: bool = True, + app_name=MONGOS_APP_NAME, + uri: str = None, +) -> bool: + """Returns True if mongos is running on the provided unit.""" + mongos_client = await get_direct_mongos_client(ops_test, unit_id, auth, app_name, uri) + + try: + # wait 10 seconds in case the daemon was just started + for attempt in Retrying(stop=stop_after_delay(10)): + with attempt: + mongos_client.admin.command("ping") + except RetryError: + return False + + return True + + +async def get_mongos_uri( + ops_test: OpsTest, unit_id: int, auth: bool = True, app_name=MONGOS_APP_NAME +): + mongos_host = await get_address_of_unit(ops_test, unit_id) + + if not auth: + return f"mongodb://{mongos_host}:{MONGOS_PORT}" + else: + username, password = await get_mongos_user_password(ops_test, app_name) + return f"mongodb://{username}:{password}@{mongos_host}:{MONGOS_PORT}" + - # port information is the first item of the last line - port_mapping = result.stdout.splitlines()[-1].split()[0] +async def get_direct_mongos_client( + ops_test: OpsTest, + unit_id: int, + auth: bool = True, + app_name: str = MONGOS_APP_NAME, + uri: str = None, +) -> MongoClient: + """Returns a direct mongodb client potentially passing over some of the units.""" + mongos_uri = uri or await get_mongos_uri(ops_test, unit_id, auth, app_name) - # port mapping is of the form ip-172-31-18-133 - return port_mapping.split("ip-")[1].replace("-", ".") + return MongoClient(mongos_uri, directConnection=True) diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py index 2bf0ff2d..1b4300a4 100644 --- a/tests/integration/test_charm.py +++ b/tests/integration/test_charm.py @@ -2,43 +2,35 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. -from pathlib import Path import pytest -import yaml from pytest_operator.plugin import OpsTest from .helpers import ( - assert_node_port_available, + check_mongos, + get_direct_mongos_client, + get_address_of_unit, wait_for_mongos_units_blocked, + SHARD_APP_NAME, + CONFIG_SERVER_APP_NAME, + CLUSTER_REL_NAME, MONGOS_APP_NAME, + MONGOS_PORT, + SHARD_REL_NAME, + CONFIG_SERVER_REL_NAME, + deploy_cluster_components, ) -METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) +TEST_USER_NAME = "TestUserName1" +TEST_USER_PWD = "Test123" +TEST_DB_NAME = "my-test-db" @pytest.mark.group(1) @pytest.mark.abort_on_fail async def test_build_and_deploy(ops_test: OpsTest): - """TODO: Build the charm-under-test and deploy it together with related charms. - - Assert on the unit status before any relations/configurations take place. - """ - charm = await ops_test.build_charm(".") - resources = { - "mongodb-image": METADATA["resources"]["mongodb-image"]["upstream-source"] - } - await ops_test.model.deploy( - charm, - resources=resources, - application_name=MONGOS_APP_NAME, - series="jammy", - num_units=2, - ) - - await ops_test.model.wait_for_idle( - apps=[MONGOS_APP_NAME], timeout=1000, idle_period=30 - ) + """Build and deploy a sharded cluster.""" + await deploy_cluster_components(ops_test) @pytest.mark.group(1) @@ -55,21 +47,86 @@ async def test_waits_for_config_server(ops_test: OpsTest) -> None: ) +async def test_mongos_starts_with_config_server(ops_test: OpsTest) -> None: + # prepare sharded cluster + await ops_test.model.wait_for_idle( + apps=[CONFIG_SERVER_APP_NAME, SHARD_APP_NAME], + idle_period=10, + raise_on_blocked=False, + raise_on_error=False, # Removed this once DPE-4996 is resolved. + ) + await ops_test.model.integrate( + f"{SHARD_APP_NAME}:{SHARD_REL_NAME}", + f"{CONFIG_SERVER_APP_NAME}:{CONFIG_SERVER_REL_NAME}", + ) + await ops_test.model.wait_for_idle( + apps=[CONFIG_SERVER_APP_NAME, SHARD_APP_NAME], + idle_period=20, + raise_on_blocked=False, + raise_on_error=False, # https://github.com/canonical/mongodb-k8s-operator/issues/301 + ) + + # connect sharded cluster to mongos + await ops_test.model.integrate( + f"{MONGOS_APP_NAME}:{CLUSTER_REL_NAME}", + f"{CONFIG_SERVER_APP_NAME}:{CLUSTER_REL_NAME}", + ) + await ops_test.model.wait_for_idle( + apps=[CONFIG_SERVER_APP_NAME, SHARD_APP_NAME, MONGOS_APP_NAME], + idle_period=20, + status="active", + raise_on_error=False, # Removed this once DPE-4996 is resolved. + ) + + mongos_running = await check_mongos(ops_test, unit_id=0, auth=False) + assert mongos_running, "Mongos is not currently running." + + +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_mongos_has_user(ops_test: OpsTest) -> None: + mongos_running = await check_mongos(ops_test, unit_id=0, auth=True) + assert mongos_running, "Mongos is not currently running." + + +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_user_with_extra_roles(ops_test: OpsTest) -> None: + mongos_client = await get_direct_mongos_client( + ops_test, unit_id=0, auth=True, app_name=MONGOS_APP_NAME + ) + mongos_client.admin.command( + "createUser", + TEST_USER_NAME, + pwd=TEST_USER_PWD, + roles=[{"role": "readWrite", "db": TEST_DB_NAME}], + mechanisms=["SCRAM-SHA-256"], + ) + mongos_client.close() + mongos_host = await get_address_of_unit(ops_test, unit_id=0) + test_user_uri = f"mongodb://{TEST_USER_NAME}:{TEST_USER_PWD}@{mongos_host}:{MONGOS_PORT}" + mongos_running = await check_mongos( + ops_test, + unit_id=0, + app_name=MONGOS_APP_NAME, + auth=True, + uri=test_user_uri, + ) + assert mongos_running, "User created is not accessible." + + @pytest.mark.group(1) @pytest.mark.abort_on_fail -async def test_mongos_external_connections(ops_test: OpsTest) -> None: - """Tests that mongos is accessible externally.""" - for unit_id in range(len(ops_test.model.applications[MONGOS_APP_NAME].units)): - assert_node_port_available( - ops_test, node_port_name=f"{MONGOS_APP_NAME}-{unit_id}-external" - ) - - # TODO add this in once DPE-5040 / PR #20 merges - # exposed_node_port = get_port_from_node_port(ops_test, node_port_name="mongos-k8s-nodeport") - # public_k8s_ip = get_public_k8s_ip() - # username, password = await get_mongos_user_password(ops_test, MONGOS_APP_NAME) - # external_mongos_client = MongoClient( - # f"mongodb://{username}:{password}@{public_k8s_ip}:{exposed_node_port}" - # ) - # external_mongos_client.admin.command("usersInfo") - # external_mongos_client.close() +async def test_mongos_can_scale(ops_test: OpsTest) -> None: + """Tests that mongos powers down when no config server is accessible.""" + await ops_test.model.applications[MONGOS_APP_NAME].scale(2) + + await ops_test.model.wait_for_idle( + apps=[MONGOS_APP_NAME], + status="active", + timeout=1000, + ) + + for unit_id in range(0, len(ops_test.model.applications[MONGOS_APP_NAME].units)): + mongos_running = await check_mongos(ops_test, unit_id=unit_id, auth=True) + assert mongos_running, "Mongos is not currently running." From 9055749e59f3ccbe797c7d24757ba672a9cb9c4b Mon Sep 17 00:00:00 2001 From: MiaAltieri Date: Mon, 26 Aug 2024 09:26:40 +0000 Subject: [PATCH 10/19] WIP migrate to config-option - breaks node port --- .../mongodb/v0/config_server_interface.py | 6 +- src/charm.py | 44 ++++--- tests/integration/client_relations/helpers.py | 26 +++++ .../test_external_client_relations.py | 107 +++++++++++------- 4 files changed, 127 insertions(+), 56 deletions(-) diff --git a/lib/charms/mongodb/v0/config_server_interface.py b/lib/charms/mongodb/v0/config_server_interface.py index ba515f3e..cdb733d9 100644 --- a/lib/charms/mongodb/v0/config_server_interface.py +++ b/lib/charms/mongodb/v0/config_server_interface.py @@ -42,7 +42,7 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 9 +LIBPATCH = 11 class ClusterProvider(Object): @@ -328,6 +328,8 @@ def _on_relation_broken(self, event: RelationBrokenEvent) -> None: # K8s charm have a 1:Many client scheme and share connection info in a different manner. if self.substrate == Config.Substrate.VM: self.charm.remove_connection_info() + else: + self.db_initialised = False # BEGIN: helper functions def pass_hook_checks(self, event): @@ -371,7 +373,7 @@ def is_mongos_running(self) -> bool: connection_uri = f"mongodb://{self.charm.get_mongos_host()}" # use the mongos port for k8s charms and external connections on VM - if self.charm.is_external_client or self.substrate == Config.K8S_SUBSTRATE: + if self.substrate == Config.Substrate.K8S or self.charm.is_external_client: connection_uri = connection_uri + f":{Config.MONGOS_PORT}" with MongosConnection(None, connection_uri) as mongo: diff --git a/src/charm.py b/src/charm.py index bb5d06ca..df292b5c 100755 --- a/src/charm.py +++ b/src/charm.py @@ -60,35 +60,41 @@ class MongosCharm(ops.CharmBase): def __init__(self, *args): super().__init__(*args) + self.role = Config.Role.MONGOS + self.secrets = SecretCache(self) + self.status = MongosStatusHandler(self) + self.node_port_manager = NodePortManager(self) + + # lifecycle events + self.framework.observe(self.on.config_changed, self._on_config_changed) self.framework.observe( self.on.mongos_pebble_ready, self._on_mongos_pebble_ready ) self.framework.observe(self.on.start, self._on_start) self.framework.observe(self.on.update_status, self._on_update_status) - self.tls = MongoDBTLS(self, Config.Relations.PEERS, substrate=Config.SUBSTRATE) - self.role = Config.Role.MONGOS - self.secrets = SecretCache(self) - self.status = MongosStatusHandler(self) + # relations + self.tls = MongoDBTLS(self, Config.Relations.PEERS, substrate=Config.SUBSTRATE) self.cluster = ClusterRequirer(self, substrate=Config.SUBSTRATE) - self.node_port_manager = NodePortManager(self) - # BEGIN: hook functions def _on_config_changed(self, event: ConfigChangedEvent) -> None: """Listen to changes in the application configuration.""" - if self.expose_external not in Config.ExternalConnections.VALID_EXTERNAL_CONFIG: + external_config = self.model.config["expose-external"] + self.expose_external = external_config + if external_config not in Config.ExternalConnections.VALID_EXTERNAL_CONFIG: logger.error( "External configuration: %s for expose-external is not valid, should be one of: %s", - self.expose_external, + external_config, Config.ExternalConnections.VALID_EXTERNAL_CONFIG, ) - self.unit.status = Config.Status.INVALID_EXTERNAL_CONFIG + self.status.set_and_share_status(Config.Status.INVALID_EXTERNAL_CONFIG) + return - if self.expose_external == Config.ExternalConnections.EXTERNAL_NODEPORT: + if external_config == Config.ExternalConnections.EXTERNAL_NODEPORT: self.update_external_services() - if self.expose_external == Config.ExternalConnections.NONE: + if external_config == Config.ExternalConnections.NONE: # TODO future PR - support revoking external access pass @@ -460,11 +466,21 @@ def unit_host(self, unit: Unit) -> str: @property def expose_external(self) -> Optional[str]: """Returns mode of exposure for external connections.""" + if self.app_peer_data["expose-external"] == "none": + return + + return self.app_peer_data["expose-external"] + + @expose_external.setter + def expose_external(self, expose_external): + """Set the db_initialised flag.""" + if not self.unit.is_leader: + return - if self.app_peer_data.get("expose-external") == "none": + if expose_external not in Config.ExternalConnections.VALID_EXTERNAL_CONFIG: return - return self.app_peer_data.get("expose-external") + self.app_peer_data["expose-external"] = expose_external @property def peers_units(self) -> list[Unit]: @@ -527,7 +543,7 @@ def is_external_client(self) -> bool: Note that for K8s routers this should always default to True. However we still include this function so that we can have parity on properties with the K8s and VM routers. """ - return self.expose_external == Config.Ex + return self.expose_external == Config.ExternalConnections.EXTERNAL_NODEPORT @property def database(self) -> Optional[str]: diff --git a/tests/integration/client_relations/helpers.py b/tests/integration/client_relations/helpers.py index 3a96f49d..a9c10290 100644 --- a/tests/integration/client_relations/helpers.py +++ b/tests/integration/client_relations/helpers.py @@ -5,6 +5,11 @@ import subprocess import logging +from ..helpers import ( + MONGOS_APP_NAME, + get_mongos_user_password, + MongoClient, +) from pytest_operator.plugin import OpsTest @@ -24,6 +29,7 @@ def get_port_from_node_port(ops_test: OpsTest, node_port_name: str) -> None: len(result.stdout.splitlines()) > 0 ), "No port information available for expected service" + print(result.stdout) # port information is available at PORT_MAPPING_INDEX port_mapping = result.stdout.split()[PORT_MAPPING_INDEX] @@ -37,6 +43,26 @@ def assert_node_port_available(ops_test: OpsTest, node_port_name: str) -> None: ), "No port information for expected service" +async def assert_all_unit_node_ports_available(ops_test: OpsTest): + """Assert all ports available in mongos deployment.""" + for unit_id in range(len(ops_test.model.applications[MONGOS_APP_NAME].units)): + assert_node_port_available( + ops_test, node_port_name=f"{MONGOS_APP_NAME}-{unit_id}-external" + ) + + exposed_node_port = get_port_from_node_port( + ops_test, node_port_name=f"{MONGOS_APP_NAME}-{unit_id}-external" + ) + public_k8s_ip = get_public_k8s_ip() + username, password = await get_mongos_user_password(ops_test, MONGOS_APP_NAME) + external_mongos_client = MongoClient( + f"mongodb://{username}:{password}@{public_k8s_ip}:{exposed_node_port}" + ) + print(f"mongodb://{username}:{password}@{public_k8s_ip}:{exposed_node_port}") + external_mongos_client.admin.command("usersInfo") + external_mongos_client.close() + + def get_public_k8s_ip() -> str: result = subprocess.run( "kubectl get nodes", shell=True, capture_output=True, text=True diff --git a/tests/integration/client_relations/test_external_client_relations.py b/tests/integration/client_relations/test_external_client_relations.py index 86456903..230f94fe 100644 --- a/tests/integration/client_relations/test_external_client_relations.py +++ b/tests/integration/client_relations/test_external_client_relations.py @@ -6,55 +6,82 @@ import pytest from pytest_operator.plugin import OpsTest -from ..helpers import ( - MONGOS_APP_NAME, - build_cluster, - deploy_cluster_components, - get_mongos_user_password, - MongoClient, -) - -from .helpers import ( - assert_node_port_available, - get_port_from_node_port, - get_public_k8s_ip, -) + +from .helpers import assert_all_unit_node_ports_available + TEST_USER_NAME = "TestUserName1" TEST_USER_PWD = "Test123" TEST_DB_NAME = "my-test-db" -@pytest.mark.group(1) -@pytest.mark.abort_on_fail -async def test_build_and_deploy(ops_test: OpsTest): - """Build and deploy a sharded cluster.""" - await deploy_cluster_components(ops_test) - await build_cluster(ops_test) +# @pytest.mark.group(1) +# @pytest.mark.abort_on_fail +# async def test_build_and_deploy(ops_test: OpsTest): +# """Build and deploy a sharded cluster.""" +# await deploy_cluster_components(ops_test) +# await build_cluster(ops_test) @pytest.mark.group(1) @pytest.mark.abort_on_fail async def test_mongos_external_connections(ops_test: OpsTest) -> None: """Tests that mongos is accessible externally.""" - configuration_parameters = {"expose-external": "nodeport"} - - # apply new configuration options - await ops_test.model.applications[MONGOS_APP_NAME].set_config( - configuration_parameters - ) - for unit_id in range(len(ops_test.model.applications[MONGOS_APP_NAME].units)): - assert_node_port_available( - ops_test, node_port_name=f"{MONGOS_APP_NAME}-{unit_id}-external" - ) - - exposed_node_port = get_port_from_node_port( - ops_test, node_port_name="mongos-k8s-nodeport" - ) - public_k8s_ip = get_public_k8s_ip() - username, password = await get_mongos_user_password(ops_test, MONGOS_APP_NAME) - external_mongos_client = MongoClient( - f"mongodb://{username}:{password}@{public_k8s_ip}:{exposed_node_port}" - ) - external_mongos_client.admin.command("usersInfo") - external_mongos_client.close() + # configuration_parameters = {"expose-external": "nodeport"} + + # # apply new configuration options + # await ops_test.model.applications[MONGOS_APP_NAME].set_config(configuration_parameters) + # await ops_test.model.wait_for_idle(apps=[MONGOS_APP_NAME], idle_period=15) + + # # verify each unit has a node port available + await assert_all_unit_node_ports_available(ops_test) + + +# @pytest.mark.group(1) +# @pytest.mark.abort_on_fail +# async def test_mongos_external_connections_scale(ops_test: OpsTest) -> None: +# """Tests that new mongos units are accessible externally.""" +# await ops_test.model.applications[MONGOS_APP_NAME].scale(2) +# await ops_test.model.wait_for_idle(apps=[MONGOS_APP_NAME], idle_period=15) + +# # verify each unit has a node port available +# await assert_all_unit_node_ports_available(ops_test) + + +# @pytest.mark.group(1) +# @pytest.mark.abort_on_fail +# async def test_mongos_bad_configuration(ops_test: OpsTest) -> None: +# """Tests that mongos is accessible externally.""" +# configuration_parameters = {"expose-external": "nonsensical-setting"} + +# # apply new configuration options +# await ops_test.model.applications[MONGOS_APP_NAME].set_config(configuration_parameters) + +# # verify that Charmed Mongos is blocked and reports incorrect credentials +# await wait_for_mongos_units_blocked( +# ops_test, +# MONGOS_APP_NAME, +# status="Missing relation to config-server.", +# timeout=300, +# ) + +# # verify new-configuration didn't break old configuration +# await assert_all_unit_node_ports_available(ops_test) + + +# @pytest.mark.group(1) +# @pytest.mark.abort_on_fail +# async def test_turn_off_nodeport(ops_test: OpsTest) -> None: +# """TODO Future PR, test that when the user toggles nodeport to none, it is no longer exposed.""" + + +# @pytest.mark.group(1) +# @pytest.mark.abort_on_fail +# async def test_external_clients_use_nodeport(ops_test: OpsTest) -> None: +# """TODO Future PR, test that external clients use nodeport.""" + + +# @pytest.mark.group(1) +# @pytest.mark.abort_on_fail +# async def test_internal_clients_use_K8s(ops_test: OpsTest) -> None: +# """TODO Future PR, test that external clients use K8s even when nodeport is available.""" From efb7070fcc3d0a30b8902ebc0ce9c31b8b5790d6 Mon Sep 17 00:00:00 2001 From: MiaAltieri Date: Tue, 27 Aug 2024 08:46:58 +0000 Subject: [PATCH 11/19] node port functional again --- src/charm.py | 7 +++---- src/node_port.py | 6 +++++- .../client_relations/test_external_client_relations.py | 10 +++++++--- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/src/charm.py b/src/charm.py index a5cc5758..ea47c153 100755 --- a/src/charm.py +++ b/src/charm.py @@ -93,6 +93,7 @@ def _on_config_changed(self, event: ConfigChangedEvent) -> None: if external_config == Config.ExternalConnections.EXTERNAL_NODEPORT: self.update_external_services() + self.restart_charm_services() if external_config == Config.ExternalConnections.NONE: # TODO future PR - support revoking external access @@ -103,7 +104,6 @@ def _on_config_changed(self, event: ConfigChangedEvent) -> None: def _on_mongos_pebble_ready(self, event) -> None: """Configure MongoDB pebble layer specification.""" # any external services must be created before setting of properties - if not self.is_integrated_to_config_server(): logger.info( "mongos service not starting. Cannot start until application is integrated to a config-server." @@ -172,8 +172,7 @@ def _on_update_status(self, _): # BEGIN: helper functions def update_external_services(self) -> None: """Attempts to update any external Kubernetes services.""" - # every unit attempts to create a nodeport service - # if exists, will silently continue + # every unit attempts to create a nodeport service - if exists, will silently continue self.node_port_manager.apply_service( service=self.node_port_manager.build_node_port_services( port=Config.MONGOS_PORT @@ -474,7 +473,7 @@ def expose_external(self) -> Optional[str]: @expose_external.setter def expose_external(self, expose_external): """Set the db_initialised flag.""" - if not self.unit.is_leader: + if not self.unit.is_leader(): return if expose_external not in Config.ExternalConnections.VALID_EXTERNAL_CONFIG: diff --git a/src/node_port.py b/src/node_port.py index 6798f0d4..52eb0f20 100644 --- a/src/node_port.py +++ b/src/node_port.py @@ -77,8 +77,12 @@ def build_node_port_services(self, port: str) -> Service: ], ), spec=ServiceSpec( + externalTrafficPolicy="Local", type="NodePort", - selector={"app.kubernetes.io/name": self.pod_name}, + selector={ + "app.kubernetes.io/name": self.app_name, + "statefulset.kubernetes.io/pod-name": self.pod_name, + }, ports=[ ServicePort( protocol="TCP", diff --git a/tests/integration/client_relations/test_external_client_relations.py b/tests/integration/client_relations/test_external_client_relations.py index 6a689518..8b5a69ba 100644 --- a/tests/integration/client_relations/test_external_client_relations.py +++ b/tests/integration/client_relations/test_external_client_relations.py @@ -36,7 +36,9 @@ async def test_mongos_external_connections(ops_test: OpsTest) -> None: configuration_parameters = {"expose-external": "nodeport"} # apply new configuration options - await ops_test.model.applications[MONGOS_APP_NAME].set_config(configuration_parameters) + await ops_test.model.applications[MONGOS_APP_NAME].set_config( + configuration_parameters + ) await ops_test.model.wait_for_idle(apps=[MONGOS_APP_NAME], idle_period=15) # # verify each unit has a node port available @@ -61,13 +63,15 @@ async def test_mongos_bad_configuration(ops_test: OpsTest) -> None: configuration_parameters = {"expose-external": "nonsensical-setting"} # apply new configuration options - await ops_test.model.applications[MONGOS_APP_NAME].set_config(configuration_parameters) + await ops_test.model.applications[MONGOS_APP_NAME].set_config( + configuration_parameters + ) # verify that Charmed Mongos is blocked and reports incorrect credentials await wait_for_mongos_units_blocked( ops_test, MONGOS_APP_NAME, - status="Missing relation to config-server.", + status="Config option for expose-external not valid.", timeout=300, ) From 7c3bdd6899ee3358d68a0d5458f262f48d93f07c Mon Sep 17 00:00:00 2001 From: MiaAltieri Date: Tue, 27 Aug 2024 10:01:54 +0000 Subject: [PATCH 12/19] remove unnecessary changes to get node port to work --- src/charm.py | 17 ++++++++++++----- src/node_port.py | 1 - tests/integration/client_relations/helpers.py | 2 -- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/src/charm.py b/src/charm.py index ea47c153..d040dffe 100755 --- a/src/charm.py +++ b/src/charm.py @@ -80,8 +80,8 @@ def __init__(self, *args): # BEGIN: hook functions def _on_config_changed(self, event: ConfigChangedEvent) -> None: """Listen to changes in the application configuration.""" + previous_config = self.expose_external external_config = self.model.config["expose-external"] - self.expose_external = external_config if external_config not in Config.ExternalConnections.VALID_EXTERNAL_CONFIG: logger.error( "External configuration: %s for expose-external is not valid, should be one of: %s", @@ -91,15 +91,19 @@ def _on_config_changed(self, event: ConfigChangedEvent) -> None: self.status.set_and_share_status(Config.Status.INVALID_EXTERNAL_CONFIG) return + self.expose_external = external_config if external_config == Config.ExternalConnections.EXTERNAL_NODEPORT: self.update_external_services() - self.restart_charm_services() - if external_config == Config.ExternalConnections.NONE: - # TODO future PR - support revoking external access + if ( + external_config == Config.ExternalConnections.NONE + and previous_config == Config.ExternalConnections.EXTERNAL_NODEPORT + ): + # TODO DPE-5268 - support revoking external access pass # TODO DPE-5235 support updating data-integrator clients to have/not have public IP + # depending on the result of the configuration def _on_mongos_pebble_ready(self, event) -> None: """Configure MongoDB pebble layer specification.""" @@ -465,7 +469,10 @@ def unit_host(self, unit: Unit) -> str: @property def expose_external(self) -> Optional[str]: """Returns mode of exposure for external connections.""" - if self.app_peer_data["expose-external"] == "none": + if ( + self.app_peer_data.get("expose-external", Config.ExternalConnections.NONE) + == Config.ExternalConnections.NONE + ): return return self.app_peer_data["expose-external"] diff --git a/src/node_port.py b/src/node_port.py index 52eb0f20..5ba26d33 100644 --- a/src/node_port.py +++ b/src/node_port.py @@ -80,7 +80,6 @@ def build_node_port_services(self, port: str) -> Service: externalTrafficPolicy="Local", type="NodePort", selector={ - "app.kubernetes.io/name": self.app_name, "statefulset.kubernetes.io/pod-name": self.pod_name, }, ports=[ diff --git a/tests/integration/client_relations/helpers.py b/tests/integration/client_relations/helpers.py index a9c10290..f7df7259 100644 --- a/tests/integration/client_relations/helpers.py +++ b/tests/integration/client_relations/helpers.py @@ -29,7 +29,6 @@ def get_port_from_node_port(ops_test: OpsTest, node_port_name: str) -> None: len(result.stdout.splitlines()) > 0 ), "No port information available for expected service" - print(result.stdout) # port information is available at PORT_MAPPING_INDEX port_mapping = result.stdout.split()[PORT_MAPPING_INDEX] @@ -58,7 +57,6 @@ async def assert_all_unit_node_ports_available(ops_test: OpsTest): external_mongos_client = MongoClient( f"mongodb://{username}:{password}@{public_k8s_ip}:{exposed_node_port}" ) - print(f"mongodb://{username}:{password}@{public_k8s_ip}:{exposed_node_port}") external_mongos_client.admin.command("usersInfo") external_mongos_client.close() From c2f767699f02215e83a37a3dae7114af3562a082 Mon Sep 17 00:00:00 2001 From: MiaAltieri Date: Tue, 27 Aug 2024 10:09:23 +0000 Subject: [PATCH 13/19] personal nits --- src/charm.py | 22 +++++++++---------- .../integration/client_relations/__init__.py | 2 +- 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/src/charm.py b/src/charm.py index d040dffe..2e29625f 100755 --- a/src/charm.py +++ b/src/charm.py @@ -93,7 +93,12 @@ def _on_config_changed(self, event: ConfigChangedEvent) -> None: self.expose_external = external_config if external_config == Config.ExternalConnections.EXTERNAL_NODEPORT: - self.update_external_services() + # every unit attempts to create a nodeport service - if exists, will silently continue + self.node_port_manager.apply_service( + service=self.node_port_manager.build_node_port_services( + port=Config.MONGOS_PORT + ) + ) if ( external_config == Config.ExternalConnections.NONE @@ -107,7 +112,6 @@ def _on_config_changed(self, event: ConfigChangedEvent) -> None: def _on_mongos_pebble_ready(self, event) -> None: """Configure MongoDB pebble layer specification.""" - # any external services must be created before setting of properties if not self.is_integrated_to_config_server(): logger.info( "mongos service not starting. Cannot start until application is integrated to a config-server." @@ -149,7 +153,10 @@ def _on_start(self, event: StartEvent) -> None: def _on_update_status(self, _): """Handle the update status event""" - if self.expose_external not in Config.ExternalConnections.VALID_EXTERNAL_CONFIG: + if ( + self.model.config["expose-external"] + not in Config.ExternalConnections.VALID_EXTERNAL_CONFIG + ): logger.error( "External configuration: %s for expose-external is not valid, should be one of: %s", self.expose_external, @@ -174,15 +181,6 @@ def _on_update_status(self, _): # END: hook functions # BEGIN: helper functions - def update_external_services(self) -> None: - """Attempts to update any external Kubernetes services.""" - # every unit attempts to create a nodeport service - if exists, will silently continue - self.node_port_manager.apply_service( - service=self.node_port_manager.build_node_port_services( - port=Config.MONGOS_PORT - ) - ) - def get_keyfile_contents(self) -> str | None: """Retrieves the contents of the keyfile on host machine.""" # wait for keyFile to be created by leader unit diff --git a/tests/integration/client_relations/__init__.py b/tests/integration/client_relations/__init__.py index db3bfe1a..e3979c0f 100644 --- a/tests/integration/client_relations/__init__.py +++ b/tests/integration/client_relations/__init__.py @@ -1,2 +1,2 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. From b01dc0f0da40b498e798a20c2b6a8c328259c49b Mon Sep 17 00:00:00 2001 From: Mia Altieri <32723809+MiaAltieri@users.noreply.github.com> Date: Wed, 28 Aug 2024 09:03:30 +0200 Subject: [PATCH 14/19] Update src/charm.py Co-authored-by: Mehdi Bendriss --- src/charm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/charm.py b/src/charm.py index 2e29625f..4841b645 100755 --- a/src/charm.py +++ b/src/charm.py @@ -471,7 +471,7 @@ def expose_external(self) -> Optional[str]: self.app_peer_data.get("expose-external", Config.ExternalConnections.NONE) == Config.ExternalConnections.NONE ): - return + return None return self.app_peer_data["expose-external"] From 6e4dc9d3e03fcd23537647ab27a3096e4cada7d7 Mon Sep 17 00:00:00 2001 From: Mia Altieri <32723809+MiaAltieri@users.noreply.github.com> Date: Wed, 28 Aug 2024 09:03:37 +0200 Subject: [PATCH 15/19] Update src/charm.py Co-authored-by: Mehdi Bendriss --- src/charm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/charm.py b/src/charm.py index 4841b645..c0b74930 100755 --- a/src/charm.py +++ b/src/charm.py @@ -476,7 +476,7 @@ def expose_external(self) -> Optional[str]: return self.app_peer_data["expose-external"] @expose_external.setter - def expose_external(self, expose_external): + def expose_external(self, expose_external: str) -> None: """Set the db_initialised flag.""" if not self.unit.is_leader(): return From bed9859a8ba599d16985a114e316d8aa2637f948 Mon Sep 17 00:00:00 2001 From: Mia Altieri <32723809+MiaAltieri@users.noreply.github.com> Date: Wed, 28 Aug 2024 09:03:46 +0200 Subject: [PATCH 16/19] Update tests/integration/client_relations/helpers.py Co-authored-by: Mehdi Bendriss --- tests/integration/client_relations/helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/client_relations/helpers.py b/tests/integration/client_relations/helpers.py index f7df7259..60b3742b 100644 --- a/tests/integration/client_relations/helpers.py +++ b/tests/integration/client_relations/helpers.py @@ -18,7 +18,7 @@ PORT_MAPPING_INDEX = 4 -def get_port_from_node_port(ops_test: OpsTest, node_port_name: str) -> None: +def get_port_from_node_port(ops_test: OpsTest, node_port_name: str) -> str: node_port_cmd = f"kubectl get svc -n {ops_test.model.name} | grep NodePort | grep {node_port_name}" result = subprocess.run(node_port_cmd, shell=True, capture_output=True, text=True) if result.returncode: From cd1fa10ab38f2fcbc51e3b7ceb62d464a1fe9141 Mon Sep 17 00:00:00 2001 From: Mia Altieri Date: Thu, 29 Aug 2024 07:34:21 +0000 Subject: [PATCH 17/19] more infomrative log messages in tests --- tests/integration/client_relations/helpers.py | 70 +++++++++++++++---- 1 file changed, 55 insertions(+), 15 deletions(-) diff --git a/tests/integration/client_relations/helpers.py b/tests/integration/client_relations/helpers.py index c4c07f55..059afa33 100644 --- a/tests/integration/client_relations/helpers.py +++ b/tests/integration/client_relations/helpers.py @@ -21,6 +21,7 @@ ) from pytest_operator.plugin import OpsTest +from pymongo.errors import ServerSelectionTimeoutError PORT_MAPPING_INDEX = 4 @@ -81,12 +82,18 @@ def is_relation_joined(ops_test: OpsTest, endpoint_one: str, endpoint_two: str) return False -def get_port_from_node_port(ops_test: OpsTest, node_port_name: str) -> str: +def get_node_port_info(ops_test: OpsTest, node_port_name: str) -> str: node_port_cmd = f"kubectl get svc -n {ops_test.model.name} | grep NodePort | grep {node_port_name}" - result = subprocess.run(node_port_cmd, shell=True, capture_output=True, text=True) - if result.returncode: - logger.info("was not able to find nodeport") - assert False, f"Command: {node_port_cmd} to find node port failed." + return subprocess.run(node_port_cmd, shell=True, capture_output=True, text=True) + + +def has_node_port(ops_test: OpsTest, node_port_name: str) -> None: + result = get_node_port_info(ops_test, node_port_name) + return len(result.stdout.splitlines()) > 0 + + +def get_port_from_node_port(ops_test: OpsTest, node_port_name: str) -> str: + result = get_node_port_info(ops_test, node_port_name) assert ( len(result.stdout.splitlines()) > 0 @@ -99,42 +106,75 @@ def get_port_from_node_port(ops_test: OpsTest, node_port_name: str) -> str: return port_mapping.split(":")[1].split("/")[0] -def assert_node_port_available(ops_test: OpsTest, node_port_name: str) -> None: - assert get_port_from_node_port( - ops_test, node_port_name - ), "No port information for expected service" +def assert_node_port_availablity( + ops_test: OpsTest, node_port_name: str, available: bool = True +) -> None: + incorrect_availablity = "not available" if available else "is available" + assert ( + has_node_port(ops_test, node_port_name) == available + ), f"Port information {incorrect_availablity} for service" async def assert_all_unit_node_ports_available(ops_test: OpsTest): """Assert all ports available in mongos deployment.""" for unit_id in range(len(ops_test.model.applications[MONGOS_APP_NAME].units)): - assert_node_port_available( + assert_node_port_availablity( ops_test, node_port_name=f"{MONGOS_APP_NAME}-{unit_id}-external" ) exposed_node_port = get_port_from_node_port( ops_test, node_port_name=f"{MONGOS_APP_NAME}-{unit_id}-external" ) - public_k8s_ip = get_public_k8s_ip() - username, password = await get_mongos_user_password(ops_test, MONGOS_APP_NAME) + + assert await is_external_mongos_client_reachble( + ops_test, exposed_node_port + ), "client is not reachable" + + +async def is_external_mongos_client_reachble( + ops_test: OpsTest, exposed_node_port: str +) -> bool: + """Returns True if the mongos client is reachable on the provided node port via the k8s ip.""" + public_k8s_ip = get_public_k8s_ip() + username, password = await get_mongos_user_password(ops_test, MONGOS_APP_NAME) + try: external_mongos_client = MongoClient( f"mongodb://{username}:{password}@{public_k8s_ip}:{exposed_node_port}" ) external_mongos_client.admin.command("usersInfo") + except ServerSelectionTimeoutError: + return False + finally: external_mongos_client.close() + return True + + +async def assert_all_unit_node_ports_are_unavailable(ops_test: OpsTest): + """Assert all ports available in mongos deployment.""" + for unit_id in range(len(ops_test.model.applications[MONGOS_APP_NAME].units)): + assert_node_port_availablity( + ops_test, + node_port_name=f"{MONGOS_APP_NAME}-{unit_id}-external", + available=False, + ) + -@retry(stop=stop_after_attempt(10), wait=wait_fixed(1), reraise=True) def get_public_k8s_ip() -> str: result = subprocess.run( - "sudo kubectl get nodes", shell=True, capture_output=True, text=True + "kubectl get nodes", shell=True, capture_output=True, text=True ) if result.returncode: - logger.info("failed to retrieve public facing k8s IP") + logger.info("failed to retrieve public facing k8s IP error: %s", result.stderr) + assert False, "failed to retrieve public facing k8s IP" + + if len(result.stdout.splitlines()) < 2: + logger.info("No entries for public facing k8s IP, : %s", result.stdout) assert False, "failed to retrieve public facing k8s IP" # port information is the first item of the last line + logger.info("Retrieved port information: %s", result.stdout) port_mapping = result.stdout.splitlines()[-1].split()[0] # port mapping is of the form ip-172-31-18-133 From b275352506205b2dfce5dae6ecfc37dd270468a0 Mon Sep 17 00:00:00 2001 From: Mia Altieri Date: Thu, 29 Aug 2024 13:58:04 +0000 Subject: [PATCH 18/19] update the way we parse for k8s ip --- tests/integration/client_relations/helpers.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/tests/integration/client_relations/helpers.py b/tests/integration/client_relations/helpers.py index 059afa33..e2e0180a 100644 --- a/tests/integration/client_relations/helpers.py +++ b/tests/integration/client_relations/helpers.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. +import json from typing import Tuple import logging from pathlib import Path @@ -162,20 +163,16 @@ async def assert_all_unit_node_ports_are_unavailable(ops_test: OpsTest): def get_public_k8s_ip() -> str: result = subprocess.run( - "kubectl get nodes", shell=True, capture_output=True, text=True + "kubectl get nodes -o json", shell=True, capture_output=True, text=True ) if result.returncode: logger.info("failed to retrieve public facing k8s IP error: %s", result.stderr) assert False, "failed to retrieve public facing k8s IP" - if len(result.stdout.splitlines()) < 2: - logger.info("No entries for public facing k8s IP, : %s", result.stdout) - assert False, "failed to retrieve public facing k8s IP" - - # port information is the first item of the last line - logger.info("Retrieved port information: %s", result.stdout) - port_mapping = result.stdout.splitlines()[-1].split()[0] + node_info = json.loads(result.stdout) - # port mapping is of the form ip-172-31-18-133 - return port_mapping.split("ip-")[1].replace("-", ".") + try: + return node_info["items"][0]["status"]["addresses"][0]["address"] + except KeyError: + assert False, "failed to retrieve public facing k8s IP" From 79e0707473b04daea10a4f47830e4e30b8286e06 Mon Sep 17 00:00:00 2001 From: Mia Altieri Date: Thu, 29 Aug 2024 15:23:01 +0000 Subject: [PATCH 19/19] use exisiting function --- tests/integration/client_relations/helpers.py | 15 +-------------- .../test_internal_client_relations.py | 17 +++++++++++++++-- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/tests/integration/client_relations/helpers.py b/tests/integration/client_relations/helpers.py index e2e0180a..69b5aa55 100644 --- a/tests/integration/client_relations/helpers.py +++ b/tests/integration/client_relations/helpers.py @@ -19,6 +19,7 @@ MongoClient, get_application_relation_data, get_secret_data, + get_mongos_user_password, ) from pytest_operator.plugin import OpsTest @@ -41,20 +42,6 @@ METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) -@retry(stop=stop_after_attempt(10), wait=wait_fixed(15), reraise=True) -async def get_mongos_user_password( - ops_test: OpsTest, app_name=MONGOS_APP_NAME, relation_name="cluster" -) -> Tuple[str, str]: - secret_uri = await get_application_relation_data( - ops_test, app_name, relation_name=relation_name, key="secret-user" - ) - assert secret_uri, "No secret URI found" - - secret_data = await get_secret_data(ops_test, secret_uri) - - return secret_data.get("username"), secret_data.get("password") - - @retry(stop=stop_after_attempt(10), wait=wait_fixed(15), reraise=True) async def get_client_connection_string( ops_test: OpsTest, app_name=MONGOS_APP_NAME, relation_name="cluster" diff --git a/tests/integration/client_relations/test_internal_client_relations.py b/tests/integration/client_relations/test_internal_client_relations.py index 7c5a08cf..b541cf5f 100644 --- a/tests/integration/client_relations/test_internal_client_relations.py +++ b/tests/integration/client_relations/test_internal_client_relations.py @@ -5,6 +5,7 @@ import pytest from pytest_operator.plugin import OpsTest +from typing import Tuple from ..helpers import ( build_cluster, @@ -14,11 +15,12 @@ check_mongos, get_direct_mongos_client, MONGOS_PORT, + get_secret_data, + get_application_relation_data, ) from .helpers import ( is_relation_joined, get_client_connection_string, - get_mongos_user_password, ) CLIENT_RELATION_NAME = "mongos" @@ -55,7 +57,6 @@ async def test_integrate_with_internal_client(ops_test: OpsTest) -> None: await ops_test.model.wait_for_idle( apps=[APPLICATION_APP_NAME, MONGOS_APP_NAME], status="active", idle_period=20 ) - await ops_test.model.block_until( lambda: is_relation_joined( ops_test, @@ -145,3 +146,15 @@ async def test_removed_relation_no_longer_has_access(ops_test: OpsTest): assert ( not mongos_can_connect_with_auth ), "Client can still connect after relation broken." + + +# TODO, use get_mongos_user_password in base helpers once DPE:5215 is fixed retrieve via secret +async def get_mongos_user_password( + ops_test: OpsTest, app_name=MONGOS_APP_NAME, relation_name="cluster" +) -> Tuple[str, str]: + secret_uri = await get_application_relation_data( + ops_test, app_name, relation_name=relation_name, key="secret-user" + ) + + secret_data = await get_secret_data(ops_test, secret_uri) + return secret_data.get("username"), secret_data.get("password")