Skip to content

Commit

Permalink
Merge branch 'master' into win-native
Browse files Browse the repository at this point in the history
  • Loading branch information
stefanhellander committed Nov 21, 2023
2 parents d5b2873 + 36cf7f7 commit e229fd0
Show file tree
Hide file tree
Showing 477 changed files with 10,847 additions and 8,111 deletions.
83 changes: 83 additions & 0 deletions .ci/tests/examples/api_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
import fire
import yaml

from fedn import APIClient


def _download_config(output):
""" Download the client configuration file from the controller.
:param output: The output file path.
:type output: str
"""
client = APIClient(host="localhost", port=8092)
config = client.get_client_config(checksum=True)
with open(output, 'w') as f:
f.write(yaml.dump(config))


def test_api_get_methods():
client = APIClient(host="localhost", port=8092)
status = client.get_controller_status()
assert status
print("Controller status: ", status, flush=True)

events = client.get_events()
assert events
print("Events: ", events, flush=True)

validations = client.list_validations()
assert validations
print("Validations: ", validations, flush=True)

models = client.get_model_trail()
assert models
print("Models: ", models, flush=True)

clients = client.list_clients()
assert clients
print("Clients: ", clients, flush=True)

combiners = client.list_combiners()
assert combiners
print("Combiners: ", combiners, flush=True)

combiner = client.get_combiner("combiner")
assert combiner
print("Combiner: ", combiner, flush=True)

first_model = client.get_initial_model()
assert first_model
print("First model: ", first_model, flush=True)

package = client.get_package()
assert package
print("Package: ", package, flush=True)

checksum = client.get_package_checksum()
assert checksum
print("Checksum: ", checksum, flush=True)

rounds = client.list_rounds()
assert rounds
print("Rounds: ", rounds, flush=True)

round = client.get_round(1)
assert round
print("Round: ", round, flush=True)

sessions = client.list_sessions()
assert sessions
print("Sessions: ", sessions, flush=True)


if __name__ == '__main__':

client = APIClient(host="localhost", port=8092)
fire.Fire({
'set_seed': client.set_initial_model,
'set_package': client.set_package,
'start_session': client.start_session,
'get_client_config': _download_config,
'test_api_get_methods': test_api_get_methods,
})
2 changes: 1 addition & 1 deletion .ci/tests/examples/configure.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ set -e

# Parse example name
if [ "$#" -ne 2 ]; then
>&2 echo "Wrong number of arguments (usage: run.sh <example-name>)"
>&2 echo "Wrong number of arguments (usage: configure.sh <example-name>)"
exit 1
fi
example="$1"
Expand Down
35 changes: 35 additions & 0 deletions .ci/tests/examples/inference_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import sys
from time import sleep

import pymongo

N_CLIENTS = 2
RETRIES = 18
SLEEP = 10


def _eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)


def _wait_n_rounds(collection):
n = 0
for _ in range(RETRIES):
query = {'type': 'INFERENCE'}
n = collection.count_documents(query)
if n == N_CLIENTS:
return n
_eprint(f'Succeded cleints {n}. Sleeping for {SLEEP}.')
sleep(SLEEP)
_eprint(f'Succeded clients: {n}. Giving up.')
return n


if __name__ == '__main__':
# Connect to mongo
client = pymongo.MongoClient("mongodb://fedn_admin:password@localhost:6534")

# Wait for successful rounds
succeded = _wait_n_rounds(client['fedn-test-network']['control']['status'])
assert(succeded == N_CLIENTS) # check that all rounds succeeded
_eprint(f'Succeded inference clients: {succeded}. Test passed.')
7 changes: 5 additions & 2 deletions .ci/tests/examples/print_logs.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,11 @@ docker logs "$(basename $PWD)_minio_1"
echo "Mongo logs"
docker logs "$(basename $PWD)_mongo_1"

echo "Reducer logs"
docker logs "$(basename $PWD)_reducer_1"
echo "Dashboard logs"
docker logs "$(basename $PWD)_dashboard_1"

echo "API-Server logs"
docker logs "$(basename $PWD)_api-server_1"

echo "Combiner logs"
docker logs "$(basename $PWD)_combiner_1"
Expand Down
24 changes: 8 additions & 16 deletions .ci/tests/examples/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -23,34 +23,23 @@ docker-compose \
".$example/bin/python" ../../.ci/tests/examples/wait_for.py combiners

>&2 echo "Upload compute package"
curl -k -X POST \
-F [email protected] \
-F helper="$helper" \
http://localhost:8090/context
printf '\n'
".$example/bin/python" ../../.ci/tests/examples/api_test.py set_package --path package.tgz --helper "$helper"

>&2 echo "Upload seed"
curl -k -X POST \
-F [email protected] \
http://localhost:8090/models
printf '\n'
".$example/bin/python" ../../.ci/tests/examples/api_test.py set_seed --path seed.npz

>&2 echo "Wait for clients to connect"
".$example/bin/python" ../../.ci/tests/examples/wait_for.py clients

>&2 echo "Start round"
curl -k -X POST \
-F rounds=3 \
-F validate=True \
http://localhost:8090/control
printf '\n'
>&2 echo "Start session"
".$example/bin/python" ../../.ci/tests/examples/api_test.py start_session --rounds 3 --helper "$helper"

>&2 echo "Checking rounds success"
".$example/bin/python" ../../.ci/tests/examples/wait_for.py rounds

>&2 echo "Test client connection with dowloaded settings"
# Get config
curl -k http://localhost:8090/config/download > ../../client.yaml
".$example/bin/python" ../../.ci/tests/examples/api_test.py get_client_config --output ../../client.yaml

# Redeploy clients with config
docker-compose \
Expand All @@ -62,5 +51,8 @@ docker-compose \
>&2 echo "Wait for clients to reconnect"
".$example/bin/python" ../../.ci/tests/examples/wait_for.py clients

>&2 echo "Test API GET requests"
".$example/bin/python" ../../.ci/tests/examples/api_test.py test_api_get_methods

popd
>&2 echo "Test completed successfully"
19 changes: 19 additions & 0 deletions .ci/tests/examples/run_inference.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
#!/bin/bash
set -e

# Parse example name
if [ "$#" -lt 1 ]; then
>&2 echo "Wrong number of arguments (usage: run_infrence.sh <example-name>)"
exit 1
fi
example="$1"

>&2 echo "Run inference"
pushd "examples/$example"
curl -k -X POST https://localhost:8090/infer

>&2 echo "Checking inference success"
".$example/bin/python" ../../.ci/tests/examples/inference_test.py

>&2 echo "Test completed successfully"
popd
40 changes: 25 additions & 15 deletions .ci/tests/examples/wait_for.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def _retry(try_func, **func_args):
for _ in range(RETRIES):
is_success = try_func(**func_args)
if is_success:
_eprint('Sucess.')
_eprint('Success.')
return True
_eprint(f'Sleeping for {SLEEP}.')
sleep(SLEEP)
Expand All @@ -29,29 +29,39 @@ def _retry(try_func, **func_args):
def _test_rounds(n_rounds):
client = pymongo.MongoClient(
"mongodb://fedn_admin:password@localhost:6534")
collection = client['fedn-test-network']['control']['round']
query = {'reducer.status': 'Success'}
collection = client['fedn-network']['control']['rounds']
query = {'status': 'Finished'}
n = collection.count_documents(query)
client.close()
_eprint(f'Succeded rounds: {n}.')
return n == n_rounds


def _test_nodes(n_nodes, node_type, reducer_host='localhost', reducer_port='8090'):
def _test_nodes(n_nodes, node_type, reducer_host='localhost', reducer_port='8092'):
try:
resp = requests.get(
f'http://{reducer_host}:{reducer_port}/netgraph', verify=False)

endpoint = "list_clients" if node_type == "client" else "list_combiners"

response = requests.get(
f'http://{reducer_host}:{reducer_port}/{endpoint}', verify=False)

if response.status_code == 200:

data = json.loads(response.content)

count = 0
if node_type == "client":
arr = data.get('result')
count = sum(element.get('status') == "online" for element in arr)
else:
count = data.get('count')

_eprint(f'Active {node_type}s: {count}.')
return count == n_nodes

except Exception as e:
_eprint(f'Reques exception econuntered: {e}.')
_eprint(f'Request exception enconuntered: {e}.')
return False
if resp.status_code == 200:
gr = json.loads(resp.content)
n = sum(values.get('type') == node_type and values.get(
'status') == 'active' for values in gr['nodes'])
_eprint(f'Active {node_type}s: {n}.')
return n == n_nodes
_eprint(f'Reducer returned {resp.status_code}.')
return False


def rounds(n_rounds=3):
Expand Down
9 changes: 7 additions & 2 deletions .github/workflows/code-checks.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,27 +18,32 @@ jobs:
--skip .venv
--skip .mnist-keras
--skip .mnist-pytorch
--skip fedn_pb2.py
--skip fedn_pb2_grpc.py
- name: check Python formatting
run: >
.venv/bin/autopep8 --recursive --diff
--exclude .venv
--exclude .mnist-keras
--exclude .mnist-pytorch
--exclude fedn_pb2.py
--exclude fedn_pb2_grpc.py
.
- name: run Python linter
run: >
.venv/bin/flake8 .
--exclude ".venv,.mnist-keras,.mnist-pytorch,fedn_pb2.py"
--exclude ".venv,.mnist-keras,.mnist-pytorch,fedn_pb2.py,fedn_pb2_grpc.py"
- name: check for floating imports
run: >
! grep -E -R
--exclude-dir='.venv'
--exclude-dir='.mnist-pytorch'
--exclude-dir='.mnist-keras'
--exclude-dir='docs'
--exclude-dir='docs'
--exclude='tests.py'
'^[ \t]+(import|from) ' -I .
# TODO: add linting/formatting for all file types
10 changes: 6 additions & 4 deletions .github/workflows/integration-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,12 @@ jobs:
strategy:
matrix:
to_test:
- "mnist-keras keras"
- "mnist-pytorch pytorch"
- "mnist-keras kerashelper"
- "mnist-pytorch pytorchhelper"
python_version: ["3.8", "3.9","3.10"]
os:
- ubuntu-20.04
- ubuntu-22.04
- macos-11
runs-on: ${{ matrix.os }}
steps:
- name: checkout
Expand All @@ -38,7 +37,10 @@ jobs:
- name: run ${{ matrix.to_test }}
run: .ci/tests/examples/run.sh ${{ matrix.to_test }}
if: ${{ matrix.os != 'macos-11' }} # skip Docker part for MacOS

- name: run ${{ matrix.to_test }} inference
run: .ci/tests/examples/run_inference.sh ${{ matrix.to_test }}
if: ${{ matrix.os != 'macos-11' && matrix.to_test == 'mnist-keras keras' }} # example available for Keras

- name: print logs
if: failure()
Expand Down
8 changes: 7 additions & 1 deletion .readthedocs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,10 @@ build:
python: "3.9"

sphinx:
configuration: docs/conf.py
configuration: docs/conf.py

python:
install:
- method: pip
path: ./fedn
- requirements: docs/requirements.txt
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Base image
ARG BASE_IMG=python:3.9-slim
ARG BASE_IMG=python:3.10-slim
FROM $BASE_IMG

# Requirements (use MNIST Keras as default)
Expand Down
1 change: 1 addition & 0 deletions LICENSE
Original file line number Diff line number Diff line change
Expand Up @@ -199,3 +199,4 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

2 changes: 1 addition & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ To connect a client that uses the data partition 'data/clients/1/mnist.pt':
-v $PWD/data/clients/1:/var/data \
-e ENTRYPOINT_OPTS=--data_path=/var/data/mnist.pt \
--network=fedn_default \
ghcr.io/scaleoutsystems/fedn/fedn:develop-mnist-pytorch run client -in client.yaml --name client1
ghcr.io/scaleoutsystems/fedn/fedn:master-mnist-pytorch run client -in client.yaml --name client1
You are now ready to start training the model at http://localhost:8090/control.

Expand Down
6 changes: 3 additions & 3 deletions config/settings-client.yaml.template
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
network_id: fedn-test-network
discover_host: reducer
discover_port: 8090
network_id: fedn-network
discover_host: api-server
discover_port: 8092
Loading

0 comments on commit e229fd0

Please sign in to comment.