Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Formatting and sorting imports in Python files #394

Merged
merged 5 commits into from
May 31, 2022
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions .ci/tests/examples/is_success.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import pymongo
from time import sleep
import sys
from time import sleep

import pymongo

N_ROUNDS = 3
RETRIES= 6
Expand Down
4 changes: 3 additions & 1 deletion .devcontainer/bin/init_venv.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,5 +9,7 @@ python -m venv .venv
.venv/bin/pip install \
sphinx==4.4.0 \
sphinx_press_theme==0.8.0 \
sphinx-autobuild==2021.3.14
sphinx-autobuild==2021.3.14 \
autopep8==1.5.7 \
isort==5.10.1
.venv/bin/pip install -e fedn
3 changes: 2 additions & 1 deletion .devcontainer/devcontainer.json.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@
"ms-azuretools.vscode-docker",
"ms-python.python",
"exiasr.hadolint",
"yzhang.markdown-all-in-one"
"yzhang.markdown-all-in-one",
"ms-python.isort"
],
"mounts": [
"source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind,consistency=default",
Expand Down
30 changes: 30 additions & 0 deletions .github/workflows/code-checks.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
name: "code checks"

on: push

jobs:
code-checks:
runs-on: ubuntu-20.04
steps:
- name: checkout
uses: actions/checkout@v2

- name: init venv
run: .devcontainer/bin/init_venv.sh

- name: check Python imports
run: >
.venv/bin/isort . --check --diff
--skip .venv
--skip .mnist-keras
--skip .mnist-pytorch

- name: check Python formatting
run: >
.venv/bin/autopep8 --recursive --diff
--exclude .venv
--exclude .mnist-keras
--exclude .mnist-pytorch
.

# TODO: add linting/formatting for all file types
6 changes: 6 additions & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.organizeImports": true
},
}
1 change: 1 addition & 0 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#
import os
import sys

sys.path.insert(0, os.path.abspath('../../fedn'))


Expand Down
14 changes: 9 additions & 5 deletions examples/mnist-keras/bin/get_data
Original file line number Diff line number Diff line change
@@ -1,17 +1,21 @@
#!./.mnist-keras/bin/python
import os

import fire
import tensorflow as tf
import numpy as np
import os
import tensorflow as tf


def get_data(out_dir='data'):
# Make dir if necessary
if not os.path.exists(out_dir):
os.mkdir(out_dir)

# Download data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
np.savez(f'{out_dir}/mnist.npz', x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test)
np.savez(f'{out_dir}/mnist.npz', x_train=x_train,
y_train=y_train, x_test=x_test, y_test=y_test)


if __name__ == '__main__':
fire.Fire(get_data)
fire.Fire(get_data)
17 changes: 10 additions & 7 deletions examples/mnist-keras/bin/split_data
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
#!./.mnist-keras/bin/python
import os
import numpy as np
from math import floor

import fire
import numpy as np


def splitset(dataset, parts):
n = dataset.shape[0]
Expand All @@ -26,14 +28,15 @@ def split(dataset='data/mnist.npz', outdir='data', n_splits=2):

# Make splits
for i in range(n_splits):
subdir=f'{outdir}/clients/{str(i+1)}'
subdir = f'{outdir}/clients/{str(i+1)}'
if not os.path.exists(subdir):
os.mkdir(subdir)
np.savez(f'{subdir}/mnist.npz',
x_train=data['x_train'][i],
y_train=data['y_train'][i],
x_test=data['x_test'][i],
y_test=data['y_test'][i])
x_train=data['x_train'][i],
y_train=data['y_train'][i],
x_test=data['x_test'][i],
y_test=data['y_test'][i])


if __name__ == '__main__':
fire.Fire(split)
fire.Fire(split)
43 changes: 26 additions & 17 deletions examples/mnist-keras/client/entrypoint
Original file line number Diff line number Diff line change
@@ -1,23 +1,27 @@
#!./.mnist-keras/bin/python
import tensorflow as tf
import numpy as np
from fedn.utils.kerashelper import KerasHelper
import fire
import json
import docker
import os

NUM_CLASSES=10
import docker
import fire
import numpy as np
import tensorflow as tf

from fedn.utils.kerashelper import KerasHelper

NUM_CLASSES = 10


def _get_data_path():
# Figure out FEDn client number from container name
client = docker.from_env()
container = client.containers.get(os.environ['HOSTNAME'])
number = container.name[-1]

# Return data path
return f"/var/data/clients/{number}/mnist.npz"


def _compile_model(img_rows=28, img_cols=28):
# Set input shape
input_shape = (img_rows, img_cols, 1)
Expand All @@ -30,10 +34,11 @@ def _compile_model(img_rows=28, img_cols=28):
model.add(tf.keras.layers.Dense(32, activation='relu'))
model.add(tf.keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.keras.optimizers.Adam(),
metrics=['accuracy'])
optimizer=tf.keras.optimizers.Adam(),
metrics=['accuracy'])
return model


def _load_data(data_path, is_train=True):
# Load data
if data_path is None:
Expand All @@ -50,16 +55,18 @@ def _load_data(data_path, is_train=True):

# Normalize
X = X.astype('float32')
X = np.expand_dims(X,-1)
X = np.expand_dims(X, -1)
X = X / 255
y = tf.keras.utils.to_categorical(y, NUM_CLASSES)

return X, y


def init_seed(out_path='seed.npz'):
weights = _compile_model().get_weights()
helper = KerasHelper()
helper.save_model(weights, out_path)
weights = _compile_model().get_weights()
helper = KerasHelper()
helper.save_model(weights, out_path)


def train(in_model_path, out_model_path, data_path=None, batch_size=32, epochs=1):
# Load data
Expand All @@ -73,11 +80,12 @@ def train(in_model_path, out_model_path, data_path=None, batch_size=32, epochs=1

# Train
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs)

# Save
weights = model.get_weights()
helper.save_model(weights, out_model_path)


def validate(in_model_path, out_json_path, data_path=None):
# Load data
x_train, y_train = _load_data(data_path)
Expand All @@ -104,13 +112,14 @@ def validate(in_model_path, out_json_path, data_path=None):
}

# Save JSON
with open(out_json_path,"w") as fh:
with open(out_json_path, "w") as fh:
fh.write(json.dumps(report))


if __name__ == '__main__':
fire.Fire({
'init_seed': init_seed,
'train': train,
'validate': validate,
'_get_data_path': _get_data_path, # for testing
})
'_get_data_path': _get_data_path, # for testing
})
17 changes: 11 additions & 6 deletions examples/mnist-pytorch/bin/get_data
Original file line number Diff line number Diff line change
@@ -1,17 +1,22 @@
#!./.mnist-pytorch/bin/python
import os

import fire
import torchvision
import numpy as np
import os
import torchvision


def get_data(out_dir='data'):
# Make dir if necessary
if not os.path.exists(out_dir):
os.mkdir(out_dir)

# Download data
torchvision.datasets.MNIST(root=f'{out_dir}/train', transform=torchvision.transforms.ToTensor, train=True, download=True)
torchvision.datasets.MNIST(root=f'{out_dir}/test', transform=torchvision.transforms.ToTensor, train=False, download=True)
torchvision.datasets.MNIST(
root=f'{out_dir}/train', transform=torchvision.transforms.ToTensor, train=True, download=True)
torchvision.datasets.MNIST(
root=f'{out_dir}/test', transform=torchvision.transforms.ToTensor, train=False, download=True)


if __name__ == '__main__':
fire.Fire(get_data)
fire.Fire(get_data)
21 changes: 13 additions & 8 deletions examples/mnist-pytorch/bin/split_data
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
#!./.mnist-pytorch/bin/python
import torchvision
import torch
import os
from math import floor

import fire
import os
import torch
import torchvision


def splitset(dataset, parts):
n = dataset.shape[0]
Expand All @@ -20,8 +22,10 @@ def split(out_dir='data', n_splits=2):
os.mkdir(f'{out_dir}/clients')

# Load and convert to dict
train_data = torchvision.datasets.MNIST(root=f'{out_dir}/train', transform=torchvision.transforms.ToTensor, train=True)
test_data = torchvision.datasets.MNIST(root=f'{out_dir}/test', transform=torchvision.transforms.ToTensor, train=False)
train_data = torchvision.datasets.MNIST(
root=f'{out_dir}/train', transform=torchvision.transforms.ToTensor, train=True)
test_data = torchvision.datasets.MNIST(
root=f'{out_dir}/test', transform=torchvision.transforms.ToTensor, train=False)
data = {
'x_train': splitset(train_data.data, n_splits),
'y_train': splitset(train_data.targets, n_splits),
Expand All @@ -31,7 +35,7 @@ def split(out_dir='data', n_splits=2):

# Make splits
for i in range(n_splits):
subdir=f'{out_dir}/clients/{str(i+1)}'
subdir = f'{out_dir}/clients/{str(i+1)}'
if not os.path.exists(subdir):
os.mkdir(subdir)
torch.save({
Expand All @@ -40,7 +44,8 @@ def split(out_dir='data', n_splits=2):
'x_test': data['x_test'][i],
'y_test': data['y_test'][i],
},
f'{subdir}/mnist.pt')
f'{subdir}/mnist.pt')


if __name__ == '__main__':
fire.Fire(split)
fire.Fire(split)
Loading