services: llm-gpu: image: ollama/ollama:latest profiles: ["linux-gpu"] networks: - net deploy: resources: reservations: devices: - driver: nvidia count: all capabilities: [gpu] pull-model: image: genai-stack/pull-model:latest build: context: . dockerfile: pull_model.Dockerfile environment: - OLLAMA_BASE_URL=${OLLAMA_BASE_URL-http://llm-gpu:11434} - LLM=${LLM-llama2} networks: - net tty: true database: user: neo4j:neo4j image: neo4j:5.11 ports: - 7687:7687 - 7474:7474 volumes: - $PWD/data:/data environment: - NEO4J_AUTH=${NEO4J_USERNAME-neo4j}/${NEO4J_PASSWORD-password} - NEO4J_PLUGINS=["apoc"] - NEO4J_db_tx__log_rotation_retention__policy=false - NEO4J_dbms_security_procedures_unrestricted=apoc.* healthcheck: test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider localhost:7474 || exit 1"] interval: 15s timeout: 30s retries: 10 networks: - net loader: build: context: . dockerfile: loader.Dockerfile volumes: - $PWD/embedding_model:/embedding_model environment: - NEO4J_URI=${NEO4J_URI-neo4j://database:7687} - NEO4J_PASSWORD=${NEO4J_PASSWORD-password} - NEO4J_USERNAME=${NEO4J_USERNAME-neo4j} - OPENAI_API_KEY=${OPENAI_API_KEY-} - GOOGLE_API_KEY=${GOOGLE_API_KEY-} - OLLAMA_BASE_URL=${OLLAMA_BASE_URL-http://llm-gpu:11434} - EMBEDDING_MODEL=${EMBEDDING_MODEL-sentence_transformer} - LANGCHAIN_ENDPOINT=${LANGCHAIN_ENDPOINT-"https://api.smith.langchain.com"} - LANGCHAIN_TRACING_V2=${LANGCHAIN_TRACING_V2-false} - LANGCHAIN_PROJECT=${LANGCHAIN_PROJECT} - LANGCHAIN_API_KEY=${LANGCHAIN_API_KEY} - AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} - AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} - AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} networks: - net depends_on: database: condition: service_healthy pull-model: condition: service_completed_successfully x-develop: watch: - action: rebuild path: . ignore: - bot.py - pdf_bot.py - api.py - front-end/ ports: - 8081:8080 - 8502:8502 bot: build: context: . dockerfile: bot.Dockerfile volumes: - $PWD/embedding_model:/embedding_model environment: - NEO4J_URI=${NEO4J_URI-neo4j://database:7687} - NEO4J_PASSWORD=${NEO4J_PASSWORD-password} - NEO4J_USERNAME=${NEO4J_USERNAME-neo4j} - OPENAI_API_KEY=${OPENAI_API_KEY-} - GOOGLE_API_KEY=${GOOGLE_API_KEY-} - OLLAMA_BASE_URL=${OLLAMA_BASE_URL-http://llm-gpu:11434} - LLM=${LLM-llama2} - EMBEDDING_MODEL=${EMBEDDING_MODEL-sentence_transformer} - LANGCHAIN_ENDPOINT=${LANGCHAIN_ENDPOINT-"https://api.smith.langchain.com"} - LANGCHAIN_TRACING_V2=${LANGCHAIN_TRACING_V2-false} - LANGCHAIN_PROJECT=${LANGCHAIN_PROJECT} - LANGCHAIN_API_KEY=${LANGCHAIN_API_KEY} - AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} - AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} - AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} networks: - net depends_on: database: condition: service_healthy pull-model: condition: service_completed_successfully x-develop: watch: - action: rebuild path: . ignore: - loader.py - pdf_bot.py - api.py - front-end/ ports: - 8501:8501 pdf_bot: build: context: . dockerfile: pdf_bot.Dockerfile environment: - NEO4J_URI=${NEO4J_URI-neo4j://database:7687} - NEO4J_PASSWORD=${NEO4J_PASSWORD-password} - NEO4J_USERNAME=${NEO4J_USERNAME-neo4j} - OPENAI_API_KEY=${OPENAI_API_KEY-} - GOOGLE_API_KEY=${GOOGLE_API_KEY-} - OLLAMA_BASE_URL=${OLLAMA_BASE_URL-http://llm-gpu:11434} - LLM=${LLM-llama2} - EMBEDDING_MODEL=${EMBEDDING_MODEL-sentence_transformer} - LANGCHAIN_ENDPOINT=${LANGCHAIN_ENDPOINT-"https://api.smith.langchain.com"} - LANGCHAIN_TRACING_V2=${LANGCHAIN_TRACING_V2-false} - LANGCHAIN_PROJECT=${LANGCHAIN_PROJECT} - LANGCHAIN_API_KEY=${LANGCHAIN_API_KEY} - AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} - AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} - AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} networks: - net depends_on: database: condition: service_healthy pull-model: condition: service_completed_successfully x-develop: watch: - action: rebuild path: . ignore: - loader.py - bot.py - api.py - front-end/ ports: - 8503:8503 api: build: context: . dockerfile: api.Dockerfile volumes: - $PWD/embedding_model:/embedding_model environment: - NEO4J_URI=${NEO4J_URI-neo4j://database:7687} - NEO4J_PASSWORD=${NEO4J_PASSWORD-password} - NEO4J_USERNAME=${NEO4J_USERNAME-neo4j} - OPENAI_API_KEY=${OPENAI_API_KEY} - GOOGLE_API_KEY=${GOOGLE_API_KEY} - OLLAMA_BASE_URL=${OLLAMA_BASE_URL-http://llm-gpu:11434} - LLM=${LLM-llama2} - EMBEDDING_MODEL=${EMBEDDING_MODEL-sentence_transformer} - LANGCHAIN_ENDPOINT=${LANGCHAIN_ENDPOINT-"https://api.smith.langchain.com"} - LANGCHAIN_TRACING_V2=${LANGCHAIN_TRACING_V2-false} - LANGCHAIN_PROJECT=${LANGCHAIN_PROJECT} - LANGCHAIN_API_KEY=${LANGCHAIN_API_KEY} - AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} - AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} - AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} networks: - net depends_on: database: condition: service_healthy pull-model: condition: service_completed_successfully x-develop: watch: - action: rebuild path: . ignore: - loader.py - bot.py - pdf_bot.py - front-end/ ports: - 8504:8504 healthcheck: test: ["CMD-SHELL", "wget --no-verbose --tries=1 http://localhost:8504/ || exit 1"] interval: 5s timeout: 3s retries: 5 front-end: build: context: . dockerfile: front-end.Dockerfile x-develop: watch: - action: sync path: ./front-end target: /app ignore: - ./front-end/node_modules/ - action: rebuild path: ./front-end/package.json depends_on: api: condition: service_healthy networks: - net ports: - 8505:8505 networks: net: