From 7ce8a6ff8bebf0e9936449f3b3383672fed901ad Mon Sep 17 00:00:00 2001 From: Salman Toor Date: Sun, 18 Jun 2023 11:22:13 +0000 Subject: [PATCH 1/2] Now we have both Keras and PyTorch examples --- Power-consumption-keras/Dockerfile | 16 ++ Power-consumption-keras/bin/build.sh | 8 + Power-consumption-keras/bin/get_data | 21 ++ Power-consumption-keras/bin/init_venv.sh | 10 + Power-consumption-keras/bin/split_data | 42 ++++ Power-consumption-keras/client/entrypoint | 131 ++++++++++++ Power-consumption-keras/client/fedn.yaml | 5 + Power-consumption-keras/requirements.txt | 3 + Power-consumption-pytorch/Dockerfile | 16 ++ Power-consumption-pytorch/bin/build.sh | 8 + Power-consumption-pytorch/bin/get_data | 21 ++ Power-consumption-pytorch/bin/init_venv.sh | 10 + Power-consumption-pytorch/bin/split_data | 42 ++++ Power-consumption-pytorch/client/entrypoint | 219 ++++++++++++++++++++ Power-consumption-pytorch/client/fedn.yaml | 5 + Power-consumption-pytorch/requirements.txt | 4 + 16 files changed, 561 insertions(+) create mode 100644 Power-consumption-keras/Dockerfile create mode 100755 Power-consumption-keras/bin/build.sh create mode 100755 Power-consumption-keras/bin/get_data create mode 100755 Power-consumption-keras/bin/init_venv.sh create mode 100755 Power-consumption-keras/bin/split_data create mode 100755 Power-consumption-keras/client/entrypoint create mode 100644 Power-consumption-keras/client/fedn.yaml create mode 100644 Power-consumption-keras/requirements.txt create mode 100644 Power-consumption-pytorch/Dockerfile create mode 100755 Power-consumption-pytorch/bin/build.sh create mode 100755 Power-consumption-pytorch/bin/get_data create mode 100755 Power-consumption-pytorch/bin/init_venv.sh create mode 100755 Power-consumption-pytorch/bin/split_data create mode 100755 Power-consumption-pytorch/client/entrypoint create mode 100644 Power-consumption-pytorch/client/fedn.yaml create mode 100644 Power-consumption-pytorch/requirements.txt diff --git a/Power-consumption-keras/Dockerfile b/Power-consumption-keras/Dockerfile new file mode 100644 index 0000000..e953af6 --- /dev/null +++ b/Power-consumption-keras/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.10.6-slim as base +LABEL maintainer="salman@scaleoutsystems.com" +WORKDIR /app +COPY requirements.txt . +RUN apt-get update \ + && apt-get install --no-install-recommends -y git \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ + && pip install git+https://github.com/scaleoutsystems/fedn.git@master#egg=fedn\&subdirectory=fedn \ + && pip install --no-cache-dir -r requirements.txt + + +FROM python:3.10.6-slim as build +COPY --from=base /usr/local/lib/python3.10/site-packages/ /usr/local/lib/python3.10/site-packages/ +COPY --from=base /usr/local/bin/fedn /usr/local/bin/ +WORKDIR /app diff --git a/Power-consumption-keras/bin/build.sh b/Power-consumption-keras/bin/build.sh new file mode 100755 index 0000000..44eda61 --- /dev/null +++ b/Power-consumption-keras/bin/build.sh @@ -0,0 +1,8 @@ +#!/bin/bash +set -e + +# Init seed +client/entrypoint init_seed + +# Make compute package +tar -czvf package.tgz client diff --git a/Power-consumption-keras/bin/get_data b/Power-consumption-keras/bin/get_data new file mode 100755 index 0000000..4c449d0 --- /dev/null +++ b/Power-consumption-keras/bin/get_data @@ -0,0 +1,21 @@ +#!./.mnist-keras/bin/python +import os + +import fire +import numpy as np +import tensorflow as tf + + +def get_data(out_dir='data'): + # Make dir if necessary + if not os.path.exists(out_dir): + os.mkdir(out_dir) + + # Download data + (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() + np.savez(f'{out_dir}/mnist.npz', x_train=x_train, + y_train=y_train, x_test=x_test, y_test=y_test) + + +if __name__ == '__main__': + fire.Fire(get_data) diff --git a/Power-consumption-keras/bin/init_venv.sh b/Power-consumption-keras/bin/init_venv.sh new file mode 100755 index 0000000..617b9cb --- /dev/null +++ b/Power-consumption-keras/bin/init_venv.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +# Init venv +python -m venv .power-consumption-keras + +# Pip deps +.power-consumption-keras/bin/pip install --upgrade pip +.power-consumption-keras/bin/pip install -e ../../fedn +.power-consumption-keras/bin/pip install -r requirements.txt diff --git a/Power-consumption-keras/bin/split_data b/Power-consumption-keras/bin/split_data new file mode 100755 index 0000000..bb583b6 --- /dev/null +++ b/Power-consumption-keras/bin/split_data @@ -0,0 +1,42 @@ +#!./.mnist-keras/bin/python +import os +from math import floor + +import fire +import numpy as np + + +def splitset(dataset, parts): + n = dataset.shape[0] + local_n = floor(n/parts) + result = [] + for i in range(parts): + result.append(dataset[i*local_n: (i+1)*local_n]) + return np.array(result) + + +def split(dataset='data/mnist.npz', outdir='data', n_splits=2): + # Load and convert to dict + package = np.load(dataset) + data = {} + for key, val in package.items(): + data[key] = splitset(val, n_splits) + + # Make dir if necessary + if not os.path.exists(f'{outdir}/clients'): + os.mkdir(f'{outdir}/clients') + + # Make splits + for i in range(n_splits): + subdir = f'{outdir}/clients/{str(i+1)}' + if not os.path.exists(subdir): + os.mkdir(subdir) + np.savez(f'{subdir}/mnist.npz', + x_train=data['x_train'][i], + y_train=data['y_train'][i], + x_test=data['x_test'][i], + y_test=data['y_test'][i]) + + +if __name__ == '__main__': + fire.Fire(split) diff --git a/Power-consumption-keras/client/entrypoint b/Power-consumption-keras/client/entrypoint new file mode 100755 index 0000000..a04ba58 --- /dev/null +++ b/Power-consumption-keras/client/entrypoint @@ -0,0 +1,131 @@ +#!./.power-consumption-keras/bin/python + +import json +import os + +import docker +import fire +import numpy as np +import tensorflow as tf + +from fedn.utils.kerashelper import KerasHelper + + + +def _get_data_path(): + # Figure out FEDn client number from container name + client = docker.from_env() + container = client.containers.get(os.environ['HOSTNAME']) + number = container.name[-1] + + # Return data path + return f"/var/data/clients/{number}/power.npz" + + +def _compile_model(img_rows=28, img_cols=28): + # Set input shape + #input_shape = (img_rows, img_cols, 1) + + # Define model + opt = tf.keras.optimizers.SGD(lr=0.0001) + model = tf.keras.models.Sequential() + model.add(tf.keras.layers.Dense(64, input_dim=4, activation="relu")) + model.add(tf.keras.layers.Dense(32, activation="relu")) + model.add(tf.keras.layers.Dense(1, activation="linear")) + #model.summary() + model.compile(loss = "mse", optimizer = opt,metrics=['mae']) + + return model + + +def _load_data(data_path, is_train=True): + # Load data + if data_path is None: + data = np.load(_get_data_path()) + else: + data = np.load(data_path) + + if is_train: + X = data['x_train'] + y = data['y_train'] + else: + X = data['x_test'] + y = data['y_test'] + + return X, y + + +def init_seed(out_path='seed.npz'): + weights = _compile_model().get_weights() + helper = KerasHelper() + helper.save_model(weights, out_path) + + +def train(in_model_path, out_model_path, data_path=None, batch_size=500, epochs=1): + # Load data + x_train, y_train = _load_data(data_path) + + # Load model + model = _compile_model() + helper = KerasHelper() + weights = helper.load_model(in_model_path) + model.set_weights(weights) + + # Train + model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs) + + # Save + weights = model.get_weights() + helper.save_model(weights, out_model_path) + + +def validate(in_model_path, out_json_path, data_path=None): + # Load data + x_train, y_train = _load_data(data_path) + x_test, y_test = _load_data(data_path, is_train=False) + + # Load model + model = _compile_model() + helper = KerasHelper() + weights = helper.load_model(in_model_path) + model.set_weights(weights) + + # Evaluate + #model_score = model.evaluate(x_train, y_train) + #model_score_test = model.evaluate(x_test, y_test) + #y_pred = model.predict(x_test) + #y_pred = np.argmax(y_pred, axis=1) + + # Evaluate + + y_pred = model.predict(x_test) + mae_loss = tf.keras.losses.mean_absolute_error(y_test, y_pred).numpy() + + #print('mae_loss: ', np.mean(mae_loss)) + test_mae_loss = np.mean(mae_loss) + + y_pred = model.predict(x_test) + mse_loss = tf.keras.losses.mean_squared_error(y_test, y_pred).numpy() + + #print('mse_loss: ', np.mean(mse_loss)) + test_mse_loss = np.mean(mse_loss) + + # JSON schema + report = { + "test_mae": str(test_mae_loss), + "test_mse": str(test_mse_loss), + } + + # Save JSON + with open(out_json_path, "w") as fh: + fh.write(json.dumps(report)) + + +if __name__ == '__main__': + fire.Fire({ + 'init_seed': init_seed, + 'train': train, + 'validate': validate, + '_get_data_path': _get_data_path, # for testing + }) + diff --git a/Power-consumption-keras/client/fedn.yaml b/Power-consumption-keras/client/fedn.yaml new file mode 100644 index 0000000..f2e014a --- /dev/null +++ b/Power-consumption-keras/client/fedn.yaml @@ -0,0 +1,5 @@ +entry_points: + train: + command: python entrypoint train $ENTRYPOINT_OPTS + validate: + command: python entrypoint validate $ENTRYPOINT_OPTS diff --git a/Power-consumption-keras/requirements.txt b/Power-consumption-keras/requirements.txt new file mode 100644 index 0000000..5a08a31 --- /dev/null +++ b/Power-consumption-keras/requirements.txt @@ -0,0 +1,3 @@ +tensorflow==2.8.0 +fire==0.3.1 +docker==5.0.2 \ No newline at end of file diff --git a/Power-consumption-pytorch/Dockerfile b/Power-consumption-pytorch/Dockerfile new file mode 100644 index 0000000..e953af6 --- /dev/null +++ b/Power-consumption-pytorch/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.10.6-slim as base +LABEL maintainer="salman@scaleoutsystems.com" +WORKDIR /app +COPY requirements.txt . +RUN apt-get update \ + && apt-get install --no-install-recommends -y git \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ + && pip install git+https://github.com/scaleoutsystems/fedn.git@master#egg=fedn\&subdirectory=fedn \ + && pip install --no-cache-dir -r requirements.txt + + +FROM python:3.10.6-slim as build +COPY --from=base /usr/local/lib/python3.10/site-packages/ /usr/local/lib/python3.10/site-packages/ +COPY --from=base /usr/local/bin/fedn /usr/local/bin/ +WORKDIR /app diff --git a/Power-consumption-pytorch/bin/build.sh b/Power-consumption-pytorch/bin/build.sh new file mode 100755 index 0000000..44eda61 --- /dev/null +++ b/Power-consumption-pytorch/bin/build.sh @@ -0,0 +1,8 @@ +#!/bin/bash +set -e + +# Init seed +client/entrypoint init_seed + +# Make compute package +tar -czvf package.tgz client diff --git a/Power-consumption-pytorch/bin/get_data b/Power-consumption-pytorch/bin/get_data new file mode 100755 index 0000000..4c449d0 --- /dev/null +++ b/Power-consumption-pytorch/bin/get_data @@ -0,0 +1,21 @@ +#!./.mnist-keras/bin/python +import os + +import fire +import numpy as np +import tensorflow as tf + + +def get_data(out_dir='data'): + # Make dir if necessary + if not os.path.exists(out_dir): + os.mkdir(out_dir) + + # Download data + (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() + np.savez(f'{out_dir}/mnist.npz', x_train=x_train, + y_train=y_train, x_test=x_test, y_test=y_test) + + +if __name__ == '__main__': + fire.Fire(get_data) diff --git a/Power-consumption-pytorch/bin/init_venv.sh b/Power-consumption-pytorch/bin/init_venv.sh new file mode 100755 index 0000000..ad4155c --- /dev/null +++ b/Power-consumption-pytorch/bin/init_venv.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +# Init venv +python -m venv .power-consumption-pytorch + +# Pip deps +.power-consumption-pytorch/bin/pip install --upgrade pip +.power-consumption-pytorch/bin/pip install -e /home/ubuntu/fedn/fedn +.power-consumption-pytorch/bin/pip install -r requirements.txt --no-cache-dir diff --git a/Power-consumption-pytorch/bin/split_data b/Power-consumption-pytorch/bin/split_data new file mode 100755 index 0000000..bb583b6 --- /dev/null +++ b/Power-consumption-pytorch/bin/split_data @@ -0,0 +1,42 @@ +#!./.mnist-keras/bin/python +import os +from math import floor + +import fire +import numpy as np + + +def splitset(dataset, parts): + n = dataset.shape[0] + local_n = floor(n/parts) + result = [] + for i in range(parts): + result.append(dataset[i*local_n: (i+1)*local_n]) + return np.array(result) + + +def split(dataset='data/mnist.npz', outdir='data', n_splits=2): + # Load and convert to dict + package = np.load(dataset) + data = {} + for key, val in package.items(): + data[key] = splitset(val, n_splits) + + # Make dir if necessary + if not os.path.exists(f'{outdir}/clients'): + os.mkdir(f'{outdir}/clients') + + # Make splits + for i in range(n_splits): + subdir = f'{outdir}/clients/{str(i+1)}' + if not os.path.exists(subdir): + os.mkdir(subdir) + np.savez(f'{subdir}/mnist.npz', + x_train=data['x_train'][i], + y_train=data['y_train'][i], + x_test=data['x_test'][i], + y_test=data['y_test'][i]) + + +if __name__ == '__main__': + fire.Fire(split) diff --git a/Power-consumption-pytorch/client/entrypoint b/Power-consumption-pytorch/client/entrypoint new file mode 100755 index 0000000..e72a810 --- /dev/null +++ b/Power-consumption-pytorch/client/entrypoint @@ -0,0 +1,219 @@ +#!./.power-consumption-pytorch/bin/python + +import collections +import json +import math +import os + +import docker +import fire +import numpy as np +import torch +from fedn.utils.pytorchhelper import PytorchHelper +from torch.nn import Linear +from torch.nn import ReLU +from torch.nn import Sigmoid +from torch.nn import Module +from torch.optim import SGD + +from torch.nn.init import kaiming_uniform_ +from torch.nn.init import xavier_uniform_ + +def _get_data_path(): + # Figure out FEDn client number from container name + client = docker.from_env() + container = client.containers.get(os.environ['HOSTNAME']) + number = container.name[-1] + + # Return data path + return f"/var/data/clients/{number}/power.npz" + +def _compile_model(): + + + class Net(torch.nn.Module): + def __init__(self): + super(Net, self).__init__() + + self.hidden1 = torch.nn.Linear(4, 64) + kaiming_uniform_(self.hidden1.weight, nonlinearity='relu') + self.act1 = ReLU() + + self.hidden2 = Linear(64, 32) + kaiming_uniform_(self.hidden2.weight, nonlinearity='relu') + self.act2 = ReLU() + + self.hidden3 = Linear(32, 1) + xavier_uniform_(self.hidden3.weight) + + + def forward(self, x): + + # input to first hidden layer + x = self.hidden1(x) + x = self.act1(x) + + # second hidden layer + x = self.hidden2(x) + x = self.act2(x) + + # third hidden layer and output + x = self.hidden3(x) + #x = self.act3(x) + + return x + + # Return model + return Net() + + +def _load_model(model_path): + + helper = PytorchHelper() + weights_np = helper.load_model(model_path) + weights = collections.OrderedDict() + for w in weights_np: + weights[w] = torch.tensor(weights_np[w]) + model = _compile_model() + model.load_state_dict(weights) + model.eval() + return model + + +def _load_data(data_path, is_train=True): + # Load data + if data_path is None: + data = np.load(_get_data_path()) + else: + data = np.load(data_path) + + if is_train: + X = data['x_train'] + y = data['y_train'] + else: + X = data['x_test'] + y = data['y_test'] + + return X, y + + +def init_seed(out_path='seed.npz'): + + #Init and save + model = _compile_model() + _save_model(model, out_path) + +def _save_model(model, out_path): + + weights = model.state_dict() + weights_np = collections.OrderedDict() + for w in weights: + weights_np[w] = weights[w].cpu().detach().numpy() + helper = PytorchHelper() + helper.save_model(weights, out_path) + + +def train(in_model_path, out_model_path, data_path=None, batch_size=500, epochs=1, lr=0.001): + # Load data + x_train, y_train = _load_data(data_path) + + # Load model + model = _load_model(in_model_path) + + # Train + optimizer = torch.optim.SGD(model.parameters(), lr=lr) + + print ('len(x_train): ',len(x_train)) + + n_batches = int(math.ceil(len(x_train) / batch_size)) + print ('n_batches:', n_batches) + criterion = torch.nn.L1Loss() + + print ('model shape: ', model) + + + for e in range(epochs): # epoch loop + for b in range(n_batches): # batch loop + # Retrieve current batch + batch_x_tmp = torch.from_numpy(x_train[b * batch_size:(b + 1) * batch_size]) + batch_x = torch.tensor(batch_x_tmp, dtype=torch.float32) + + batch_y_tmp = torch.from_numpy(np.expand_dims(y_train[b * batch_size:(b + 1) * batch_size],-1)) + batch_y = torch.tensor(batch_y_tmp, dtype=torch.float32) + + # Train on batch + optimizer.zero_grad() + #print('batch x shape: ', batch_x.shape ) + #print('batch y shape: ', batch_y.shape ) + outputs = model(batch_x) + + loss = criterion(outputs, batch_y) + loss.backward() + optimizer.step() + # Log + if b % 100 == 0: + print( + f"Epoch {e}/{epochs-1} | Batch: {b}/{n_batches-1} | Loss: {loss.item()}") + + + # Save + _save_model(model, out_model_path) + +def validate(in_model_path, out_json_path, data_path=None): + # Load data + x_train, y_train = _load_data(data_path) + x_test, y_test = _load_data(data_path, is_train=False) + + # Load model + model = _load_model(in_model_path) + + # Evaluate + criterion_mae = torch.nn.L1Loss() + criterion_mse = torch.nn.MSELoss() + with torch.no_grad(): + + x_train_t = torch.tensor(x_train, dtype=torch.float32) + train_out = model(x_train_t) + + y_train = torch.from_numpy(np.expand_dims(y_train,-1)) + y_train_t = torch.tensor(y_train, dtype=torch.float32) + + training_loss_mae = criterion_mae(train_out, y_train_t) + training_loss_mse = criterion_mse(train_out, y_train_t) + + x_test_t = torch.tensor(x_test, dtype=torch.float32) + test_out = model(x_test_t) + + y_test = torch.from_numpy(np.expand_dims(y_test,-1)) + y_test_t = torch.tensor(y_test, dtype=torch.float32) + + test_loss_mae = criterion_mae(test_out, y_test_t) + test_loss_mse = criterion_mse(test_out, y_test_t) + + #print('test_mae: ', test_loss_mae.item()) + #print('test_mse: ', test_loss_mse.item()) + #print('training_mae: ', training_loss_mae.item()) + #print('training_mse: ', training_loss_mse.item()) + + # JSON schema + report = { + "test_mae": str(test_loss_mae.item()), + "test_mse": str(test_loss_mse.item()), + "training_mae": str(training_loss_mae.item()), + "training_mse": str(training_loss_mse.item()), + + } + + # Save JSON + with open(out_json_path, "w") as fh: + fh.write(json.dumps(report)) + + +if __name__ == '__main__': + fire.Fire({ + 'init_seed': init_seed, + 'train': train, + 'validate': validate, + '_get_data_path': _get_data_path, # for testing + }) + diff --git a/Power-consumption-pytorch/client/fedn.yaml b/Power-consumption-pytorch/client/fedn.yaml new file mode 100644 index 0000000..f2e014a --- /dev/null +++ b/Power-consumption-pytorch/client/fedn.yaml @@ -0,0 +1,5 @@ +entry_points: + train: + command: python entrypoint train $ENTRYPOINT_OPTS + validate: + command: python entrypoint validate $ENTRYPOINT_OPTS diff --git a/Power-consumption-pytorch/requirements.txt b/Power-consumption-pytorch/requirements.txt new file mode 100644 index 0000000..0bf7a6e --- /dev/null +++ b/Power-consumption-pytorch/requirements.txt @@ -0,0 +1,4 @@ +torch==1.13.1 +torchvision==0.14.1 +fire==0.3.1 +docker==6.1.1 From d7b226516e0a1d26c903f6e294883de342729dc7 Mon Sep 17 00:00:00 2001 From: Salman Toor Date: Mon, 19 Jun 2023 09:26:42 +0000 Subject: [PATCH 2/2] Remove extra files --- Dockerfile | 16 ------ bin/build.sh | 8 --- bin/get_data | 21 -------- bin/init_venv.sh | 10 ---- bin/split_data | 42 --------------- client/entrypoint | 131 ---------------------------------------------- client/fedn.yaml | 5 -- requirements.txt | 3 -- 8 files changed, 236 deletions(-) delete mode 100644 Dockerfile delete mode 100755 bin/build.sh delete mode 100755 bin/get_data delete mode 100755 bin/init_venv.sh delete mode 100755 bin/split_data delete mode 100755 client/entrypoint delete mode 100644 client/fedn.yaml delete mode 100644 requirements.txt diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index e953af6..0000000 --- a/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM python:3.10.6-slim as base -LABEL maintainer="salman@scaleoutsystems.com" -WORKDIR /app -COPY requirements.txt . -RUN apt-get update \ - && apt-get install --no-install-recommends -y git \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* \ - && pip install git+https://github.com/scaleoutsystems/fedn.git@master#egg=fedn\&subdirectory=fedn \ - && pip install --no-cache-dir -r requirements.txt - - -FROM python:3.10.6-slim as build -COPY --from=base /usr/local/lib/python3.10/site-packages/ /usr/local/lib/python3.10/site-packages/ -COPY --from=base /usr/local/bin/fedn /usr/local/bin/ -WORKDIR /app diff --git a/bin/build.sh b/bin/build.sh deleted file mode 100755 index 44eda61..0000000 --- a/bin/build.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -set -e - -# Init seed -client/entrypoint init_seed - -# Make compute package -tar -czvf package.tgz client diff --git a/bin/get_data b/bin/get_data deleted file mode 100755 index 4c449d0..0000000 --- a/bin/get_data +++ /dev/null @@ -1,21 +0,0 @@ -#!./.mnist-keras/bin/python -import os - -import fire -import numpy as np -import tensorflow as tf - - -def get_data(out_dir='data'): - # Make dir if necessary - if not os.path.exists(out_dir): - os.mkdir(out_dir) - - # Download data - (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() - np.savez(f'{out_dir}/mnist.npz', x_train=x_train, - y_train=y_train, x_test=x_test, y_test=y_test) - - -if __name__ == '__main__': - fire.Fire(get_data) diff --git a/bin/init_venv.sh b/bin/init_venv.sh deleted file mode 100755 index 617b9cb..0000000 --- a/bin/init_venv.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -set -e - -# Init venv -python -m venv .power-consumption-keras - -# Pip deps -.power-consumption-keras/bin/pip install --upgrade pip -.power-consumption-keras/bin/pip install -e ../../fedn -.power-consumption-keras/bin/pip install -r requirements.txt diff --git a/bin/split_data b/bin/split_data deleted file mode 100755 index bb583b6..0000000 --- a/bin/split_data +++ /dev/null @@ -1,42 +0,0 @@ -#!./.mnist-keras/bin/python -import os -from math import floor - -import fire -import numpy as np - - -def splitset(dataset, parts): - n = dataset.shape[0] - local_n = floor(n/parts) - result = [] - for i in range(parts): - result.append(dataset[i*local_n: (i+1)*local_n]) - return np.array(result) - - -def split(dataset='data/mnist.npz', outdir='data', n_splits=2): - # Load and convert to dict - package = np.load(dataset) - data = {} - for key, val in package.items(): - data[key] = splitset(val, n_splits) - - # Make dir if necessary - if not os.path.exists(f'{outdir}/clients'): - os.mkdir(f'{outdir}/clients') - - # Make splits - for i in range(n_splits): - subdir = f'{outdir}/clients/{str(i+1)}' - if not os.path.exists(subdir): - os.mkdir(subdir) - np.savez(f'{subdir}/mnist.npz', - x_train=data['x_train'][i], - y_train=data['y_train'][i], - x_test=data['x_test'][i], - y_test=data['y_test'][i]) - - -if __name__ == '__main__': - fire.Fire(split) diff --git a/client/entrypoint b/client/entrypoint deleted file mode 100755 index a04ba58..0000000 --- a/client/entrypoint +++ /dev/null @@ -1,131 +0,0 @@ -#!./.power-consumption-keras/bin/python - -import json -import os - -import docker -import fire -import numpy as np -import tensorflow as tf - -from fedn.utils.kerashelper import KerasHelper - - - -def _get_data_path(): - # Figure out FEDn client number from container name - client = docker.from_env() - container = client.containers.get(os.environ['HOSTNAME']) - number = container.name[-1] - - # Return data path - return f"/var/data/clients/{number}/power.npz" - - -def _compile_model(img_rows=28, img_cols=28): - # Set input shape - #input_shape = (img_rows, img_cols, 1) - - # Define model - opt = tf.keras.optimizers.SGD(lr=0.0001) - model = tf.keras.models.Sequential() - model.add(tf.keras.layers.Dense(64, input_dim=4, activation="relu")) - model.add(tf.keras.layers.Dense(32, activation="relu")) - model.add(tf.keras.layers.Dense(1, activation="linear")) - #model.summary() - model.compile(loss = "mse", optimizer = opt,metrics=['mae']) - - return model - - -def _load_data(data_path, is_train=True): - # Load data - if data_path is None: - data = np.load(_get_data_path()) - else: - data = np.load(data_path) - - if is_train: - X = data['x_train'] - y = data['y_train'] - else: - X = data['x_test'] - y = data['y_test'] - - return X, y - - -def init_seed(out_path='seed.npz'): - weights = _compile_model().get_weights() - helper = KerasHelper() - helper.save_model(weights, out_path) - - -def train(in_model_path, out_model_path, data_path=None, batch_size=500, epochs=1): - # Load data - x_train, y_train = _load_data(data_path) - - # Load model - model = _compile_model() - helper = KerasHelper() - weights = helper.load_model(in_model_path) - model.set_weights(weights) - - # Train - model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs) - - # Save - weights = model.get_weights() - helper.save_model(weights, out_model_path) - - -def validate(in_model_path, out_json_path, data_path=None): - # Load data - x_train, y_train = _load_data(data_path) - x_test, y_test = _load_data(data_path, is_train=False) - - # Load model - model = _compile_model() - helper = KerasHelper() - weights = helper.load_model(in_model_path) - model.set_weights(weights) - - # Evaluate - #model_score = model.evaluate(x_train, y_train) - #model_score_test = model.evaluate(x_test, y_test) - #y_pred = model.predict(x_test) - #y_pred = np.argmax(y_pred, axis=1) - - # Evaluate - - y_pred = model.predict(x_test) - mae_loss = tf.keras.losses.mean_absolute_error(y_test, y_pred).numpy() - - #print('mae_loss: ', np.mean(mae_loss)) - test_mae_loss = np.mean(mae_loss) - - y_pred = model.predict(x_test) - mse_loss = tf.keras.losses.mean_squared_error(y_test, y_pred).numpy() - - #print('mse_loss: ', np.mean(mse_loss)) - test_mse_loss = np.mean(mse_loss) - - # JSON schema - report = { - "test_mae": str(test_mae_loss), - "test_mse": str(test_mse_loss), - } - - # Save JSON - with open(out_json_path, "w") as fh: - fh.write(json.dumps(report)) - - -if __name__ == '__main__': - fire.Fire({ - 'init_seed': init_seed, - 'train': train, - 'validate': validate, - '_get_data_path': _get_data_path, # for testing - }) - diff --git a/client/fedn.yaml b/client/fedn.yaml deleted file mode 100644 index f2e014a..0000000 --- a/client/fedn.yaml +++ /dev/null @@ -1,5 +0,0 @@ -entry_points: - train: - command: python entrypoint train $ENTRYPOINT_OPTS - validate: - command: python entrypoint validate $ENTRYPOINT_OPTS diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 5a08a31..0000000 --- a/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -tensorflow==2.8.0 -fire==0.3.1 -docker==5.0.2 \ No newline at end of file