Move to the 'kubernetes_notes' directory:
cd Deploying-ML-with-FastAPI-on-Docker-Kubernetes
##🐋 2. Deploy on Docker
eval $(minikube docker-env)
docker image build -t ml-prediction-with-fastapi:1.0 .
docker run --rm \
--name ml-prediction \
-p 8002:8000 -d ml-prediction-with-fastapi:1.0
curl -X 'POST' \
'http://127.0.0.1:8002/prediction/iris' \
-H 'accept: application/json' \
-H 'Content-Type: application/json' \
-d '{
"SepalLengthCm": 5.1,
"SepalWidthCm": 3.5,
"PetalLengthCm": 1.4,
"PetalWidthCm": 0.2
}'
# Stop the running Docker container
docker container stop ml-prediction
More information: https://fastapi.tiangolo.com/deployment/docker/
kubectl apply -f ml-prediction-deployment.yaml
kubectl create service nodeport ml-prediction --tcp=8000:8000
kubectl get services
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.96.0.1 443/TCP 6h24m ml-prediction NodePort 10.99.159.87 8000:31647/TCP 72s
### Make a prediction using the Minikube NodePort service
curl -X 'POST' \
'http://192.168.49.2:30833/prediction/iris' \
-H 'accept: application/json' \
-H 'Content-Type: application/json' \
-d '{
"SepalLengthCm": 5.1,
"SepalWidthCm": 3.5,
"PetalLengthCm": 1.4,
"PetalWidthCm": 0.2
}'
minikube addons enable ingress
kubectl apply -f ingress-ml-prediction.yaml
sudo vim /etc/hosts
#### 192.168.49.2 ml.prediction.vbo.local
curl -X 'POST' \
'http://ml.prediction.vbo.local/prediction/iris' \
-H 'accept: application/json' \
-H 'Content-Type: application/json' \
-d '{
"SepalLengthCm": 5.1,
"SepalWidthCm": 3.5,
"PetalLengthCm": 1.4,
"PetalWidthCm": 0.2
}'