Skip to content

Commit

Permalink
Issue #10: list supporting backends in collection metadata
Browse files Browse the repository at this point in the history
  • Loading branch information
soxofaan committed Oct 5, 2021
1 parent 8e50569 commit 958dfb8
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 3 deletions.
4 changes: 4 additions & 0 deletions src/openeo_aggregator/backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,10 +89,12 @@ def _get_all_metadata(self) -> Tuple[List[dict], _InternalCollectionMetadata]:
collections_metadata = []
internal_data = _InternalCollectionMetadata()
for cid, by_backend in grouped.items():
# TODO: don't differentiate between single and multi-backend case
if len(by_backend) == 1:
# Simple case: collection is only available on single backend.
_log.debug(f"Accept single backend collection {cid} as is")
(bid, metadata), = by_backend.items()
metadata["backends"] = [bid]
else:
_log.info(f"Merging {cid!r} collection metadata from backends {by_backend.keys()}")
metadata = self._merge_collection_metadata(by_backend)
Expand Down Expand Up @@ -157,6 +159,8 @@ def _merge_collection_metadata(self, by_backend: Dict[str, dict]) -> dict:
# TODO: use a more robust/user friendly backend pointer than backend id (which is internal implementation detail)
self.STAC_PROPERTY_PROVIDER_BACKEND: list(by_backend.keys())
}
# TODO: #10 proper schema of this backend listing?
result["backends"] = list(by_backend.keys())
# TODO: assets ?

return result
Expand Down
19 changes: 16 additions & 3 deletions tests/test_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,10 @@ def test_get_all_metadata_simple(self, multi_backend_connection, backend1, backe
requests_mock.get(backend2 + "/collections", json={"collections": [{"id": "S3"}]})
catalog = AggregatorCollectionCatalog(backends=multi_backend_connection)
metadata = catalog.get_all_metadata()
assert metadata == [{"id": "S2"}, {"id": "S3"}]
assert metadata == [
{"id": "S2", "backends": ["b1"]},
{"id": "S3", "backends": ["b2"]},
]

def test_get_all_metadata_common_collections_minimal(
self, multi_backend_connection, backend1, backend2, requests_mock
Expand All @@ -137,16 +140,23 @@ def test_get_all_metadata_common_collections_minimal(
catalog = AggregatorCollectionCatalog(backends=multi_backend_connection)
metadata = catalog.get_all_metadata()
assert metadata == [
{"id": "S3"},
{
"id": "S3",
"backends": ["b1"],
},
{
"id": "S4", "description": "S4", "title": "S4",
"stac_version": "0.9.0",
"extent": {"spatial": {"bbox": [[-180, -90, 180, 90]]}, "temporal": {"interval": [[None, None]]}},
"license": "proprietary",
"summaries": {"provider:backend": ["b1", "b2"]},
"backends": ["b1", "b2"],
"links": [],
},
{"id": "S5"},
{
"id": "S5",
"backends": ["b2"],
},
]

def test_get_all_metadata_common_collections_merging(
Expand Down Expand Up @@ -201,6 +211,7 @@ def test_get_all_metadata_common_collections_merging(
"license": "various",
"providers": [{"name": "ESA", "roles": ["producer"]}, {"name": "ESA", "roles": ["licensor"]}],
"summaries": {"provider:backend": ["b1", "b2"]},
"backends": ["b1", "b2"],
"links": [
{"rel": "license", "href": "https://spdx.org/licenses/MIT.html"},
{"rel": "license", "href": "https://spdx.org/licenses/Apache-1.0.html"},
Expand Down Expand Up @@ -253,6 +264,7 @@ def test_get_collection_metadata_merging(self, multi_backend_connection, backend
"extent": {"spatial": {"bbox": [[-180, -90, 180, 90]]}, "temporal": {"interval": [[None, None]]}},
"license": "proprietary",
"summaries": {"provider:backend": ["b1", "b2"]},
"backends": ["b1", "b2"],
"links": [],
}

Expand All @@ -274,6 +286,7 @@ def test_get_collection_metadata_merging_with_error(
"extent": {"spatial": {"bbox": [[-180, -90, 180, 90]]}, "temporal": {"interval": [[None, None]]}},
"license": "proprietary",
"summaries": {"provider:backend": ["b2"]},
"backends": ["b2"],
"links": [],
}
# TODO: test that caching of result is different from merging without error? (#2)
Expand Down

0 comments on commit 958dfb8

Please sign in to comment.