Skip to content

Commit

Permalink
Update Readme
Browse files Browse the repository at this point in the history
  • Loading branch information
davidjurado committed Sep 9, 2021
1 parent 434d56b commit 5a40603
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 5 deletions.
6 changes: 3 additions & 3 deletions image_segmentation/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@
virtualenv -p python3 ./env && source ./env/bin/activate

# Install MLCube and MLCube docker runner from GitHub repository (normally, users will just run `pip install mlcube mlcube_docker`)
git clone https://github.com/sergey-serebryakov/mlbox.git && cd mlbox && git checkout feature/configV2
cd ./runners/mlcube_docker && export PYTHONPATH=$(pwd)
cd ../../ && pip install -r mlcube/requirements.txt && pip install omegaconf && cd ../
git clone https://github.com/mlcommons/mlcube && cd mlcube/mlcube
python setup.py bdist_wheel && pip install --force-reinstall ./dist/mlcube-* && cd ..
cd ./runners/mlcube_docker && python setup.py bdist_wheel && pip install --force-reinstall --no-deps ./dist/mlcube_docker-* && cd ../../..

# Fetch the image segmentation workload
git clone https://github.com/mlcommons/training && cd ./training
Expand Down
4 changes: 2 additions & 2 deletions image_segmentation/mlcube/mlcube.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ platform:

docker:
# Image name.
image: mlcommons/train_image_segmentation:0.0.1
image: mlcommons/image_segmentation:0.0.1
# Docker build context relative to $MLCUBE_ROOT. Default is `build`.
build_context: "../pytorch"
# Docker file name within docker build context, default is `Dockerfile`.
Expand All @@ -35,4 +35,4 @@ tasks:
# parameters_file: Yaml file with training parameters.
inputs: {data_dir: processed_data/, parameters_file: {type: file, default: parameters.yaml}}
# Output folder
outpus: {output_dir: output/}
outpus: {output_dir: output/}

0 comments on commit 5a40603

Please sign in to comment.