diff --git a/src/content/Docs/_sequence.ts b/src/content/Docs/_sequence.ts index 3613cf10..0d81cb01 100644 --- a/src/content/Docs/_sequence.ts +++ b/src/content/Docs/_sequence.ts @@ -83,7 +83,22 @@ export const docsSequence = [ { label: "AI Art" }, { label: "Jupyter Notebook" }, { label: "Text-Generation-WebUI" }, - { label: "Mistral 7B"} + { label: "Mistral 7B"}, + { label: "Skypilot"}, + { label: "H2O"}, + { label: "H2O Flow"}, + { label: "H2O LLM Studio"}, + { label: "h2oGPT"}, + { label: "FlowiseAI"}, + { label: "Machine Learning Environments"}, + { label: "Deeplearning4j"}, + { label: "Keras"}, + { label: "Haystack"}, + { label: "AnythingLLM"}, + { label: "LocalAI"}, + { label: "LangFlow"}, + { label: "TensorFlow"}, + { label: "PyTorch"}, ], }, { @@ -91,19 +106,33 @@ export const docsSequence = [ subItems: [ { label: "TLS Termination of Akash Deployments" }, { label: "Multi-Tiered Deployment" }, - { label: "Ruby on Rails with Sia and Auth0" }, { label: "Jackal S3 Gateway" }, + { label: "Caddy"}, + { label: "Nginx"}, + { label: "Apache HTTP Server"}, + { label: "LocalStack"}, ], }, { label: "Tooling", subItems: [ - { label: "Provider Utilization Dashboard" }, - { label: "Akash Playground" }, + { label: "Provider Utilization Dashboard"}, { label: "Radicle" }, - { label: "PostgreSQL restore/backup" }, - { label: "Provider Utilization Dashboard" }, { label: "Akash Playground" }, + { label: "Kong"}, + { label: "Jenkins"}, + { label: "Traefik"}, + { label: "Mautic"}, + { label: "RabbitMQ"}, + { label: "JetBrains Hub"}, + { label: "EMQX"}, + { label: "Apache Flink"}, + { label: "Lightstreamer"}, + { label: "RabbitMQ"}, + { label: "NATS"}, + { label: "Apache Airflow"}, + { label: "Apache Kafka"}, + { label: "Apache Storm"}, ], }, { @@ -123,7 +152,7 @@ export const docsSequence = [ { label: "Waku" }, { label: "qBittorrent" }, { label: "Discourse" }, - { label: "Invidious" }, + { label: "Invidious" }, ], }, { @@ -132,26 +161,54 @@ export const docsSequence = [ { label: "JSON Server" }, { label: "Dolt" }, { label: "CouchDB" }, - { label: "PostgreSQL restore/backup" }, + { label: "MariaDB"}, + { label: "PostgreSQL" }, + { label: "ArangoDB"}, + { label: "OrientDB"}, + { label: "MongoDB"}, + { label: "Neo4j"}, ], }, { - label: "Data Visualizations", + label: "Hosting", + subItems:[ + { label: "Caddy"}, + { label: "WordPress"}, + { label: "Discourse"}, + { label: "Ghost"}, + ] + }, + { + label: "Data Analysis", subItems: [ { label: "Redash" }, { label: "Dash" }, { label: "Metabase" }, { label: "KNIME"}, + { label: "Dataiku"}, + { label: "Apache Superset"}, + { label: "Rstudio"}, + { label: "Apache Druid"}, + { label: "Grafana"}, + { label: "OpenSearch"}, + { label: "Matomo"}, + { label: "JupyterHub"}, + { label: "JATOS"}, ], }, { label: "Frameworks", subItems: [ + { label: "Django" }, + { label: "Flask"}, { label: "Next.js" }, { label: "React" }, { label: "Astro" }, { label: "Angular" }, - { label: "Django" }, + { label: "Nue JS"}, + { label: "Gatsby"}, + { label: "NextJS"}, + { label: "Vue.js"}, ], }, { @@ -159,14 +216,19 @@ export const docsSequence = [ subItems: [{ label: "Fast.com" }], }, { - label: "Apps", + label: "Games", subItems: [ - { label: "Waku" }, - { label: "qBittorrent" }, - { label: "Discourse" }, - { label: "Invidious" }, - ], + { label: "Minecraft"}, + ] }, + { + label: "Science", + subItems:[ + { label: "GeoNetwork"}, + { label: "GNU Octave"}, + { label: "MATLAB"} + ] + } ], }, { diff --git a/src/content/Docs/assets/h20llm.png b/src/content/Docs/assets/h20llm.png new file mode 100644 index 00000000..7341ab4a Binary files /dev/null and b/src/content/Docs/assets/h20llm.png differ diff --git a/src/content/Docs/guides/apps/photoprism/index.md b/src/content/Docs/guides/apps/photoprism/index.md new file mode 100644 index 00000000..139d4d33 --- /dev/null +++ b/src/content/Docs/guides/apps/photoprism/index.md @@ -0,0 +1,153 @@ +--- +categories: ["Guides"] +tags: ["Apps", "Photo Management",] +weight: 1 +title: "Deploying PhotoPrism on Akash" +linkTitle: "PhotoPrism" +--- + + +### **What is PhotoPrism?** +PhotoPrism is a self-hosted AI-powered photo management solution. It allows users to organize, browse, and share their photo collections using modern technology like TensorFlow for image classification and facial recognition. Features include: + +- **Automatic Organization:** Tagging, categorization, and duplicate detection. +- **Privacy-Oriented:** Self-hosted to ensure your photos remain private. +- **Powerful Search:** Use keywords, locations, or metadata to find images quickly. +- **AI Capabilities:** Facial recognition, object detection, and more. + +It is a popular choice for individuals who want to manage their photo libraries without relying on cloud platforms like Google Photos. + +--- + +## **Steps to Deploy PhotoPrism on Akash** + +--- + +### **Prerequisites** +1. **Akash CLI Installed**: Ensure you have the Akash command-line interface installed and configured. You should have an account with sufficient $AKT tokens. +2. **SDL Template**: The deployment requires an SDL (Service Definition Language) file for specifying deployment details. +3. **Akash Wallet**: Your wallet should be funded with $AKT to pay for the deployment. + +--- + +### **1. Create a Storage Volume** +PhotoPrism requires persistent storage to save photos and metadata. Create a storage volume on Akash using Akash's persistent storage feature. + +``` +--- +version: "2.0" + +services: + photoprism: + image: photoprism/photoprism + env: + PHOTOPRISM_ADMIN_PASSWORD: "yourpassword" + PHOTOPRISM_DEBUG: "true" + expose: + - port: 2342 + as: 80 + to: + - global + resources: + cpu: + units: 1000m + memory: + size: 512Mi + storage: + size: 10Gi + profiles: + compute: + - "aksh-default" + placement: + attributes: + region: us-west + signedBy: + anyOf: + - "akash1..." +profiles: + compute: + aksh-default: + resources: + cpu: + units: 1000m + memory: + size: 512Mi + storage: + size: 10Gi +deployment: + photoprism: + profile: aksh-default + count: 1 +``` + +--- + +### **2. Update the SDL File** + +1. Replace `PHOTOPRISM_ADMIN_PASSWORD` with a secure admin password. +2. Adjust the `storage` size according to your photo library requirements. +3. Update the `region` under the `placement` section if you prefer a specific geographic location for your deployment. +4. Save the file as `deploy.yaml`. +--- + +### **3. Deploy PhotoPrism on Akash** + +1. **Validate the SDL File:** + + Run the following command to validate your SDL file: + ``` + akash tx deployment create --from --node + ``` + +2. **Submit the Deployment:** + + After validation, submit your deployment to the Akash network: + ``` + akash tx deployment create deploy.yaml + ``` + +3. **Query Deployment Status:** + + Check the status of your deployment: + ``` + akash query deployment list --owner + ``` + +--- + +### **4. Access the PhotoPrism Web Interface** + +1. After the deployment is successful, note the external endpoint provided by Akash. +2. Open a browser and navigate to `http://` to access the PhotoPrism UI. +3. Log in using the admin credentials set in the SDL file. + +--- + +### **5. Upload and Organize Photos** + +- Upload your photos to PhotoPrism via the web interface. +- Let the AI-powered system analyze and organize your photo library. + +--- + +## **Customizing PhotoPrism** + +- **Environment Variables:** Adjust configurations like storage paths, database options, or feature toggles by modifying the environment variables in the SDL file. +- **Resource Scaling:** Increase or decrease CPU, memory, or storage resources in the SDL file based on your needs. + +--- + +### **Costs** +The cost of deploying PhotoPrism on Akash will depend on: +- CPU and memory resources allocated. +- Storage volume size. +- Rental prices on Akash's marketplace. + +To estimate costs, query the Akash marketplace for current bids: +``` +akash query market bid list +``` + +--- + +By deploying PhotoPrism on Akash, you get a secure, private, and scalable solution to manage your photo library efficiently without relying on centralized cloud services. \ No newline at end of file diff --git a/src/content/Docs/guides/blockchain/solar/index.md b/src/content/Docs/guides/blockchain/solar/index.md new file mode 100644 index 00000000..852aafd4 --- /dev/null +++ b/src/content/Docs/guides/blockchain/solar/index.md @@ -0,0 +1,126 @@ +--- +categories: ["Guides"] +tags: ["Blockchain", "Explorer"] +weight: 1 +title: "Guide to Deploy Solar on Akash" +linkTitle: "Solar" +--- + +**Solar** is a blockchain explorer and dashboard built for monitoring, querying, and analyzing data from blockchain networks. Its primary purpose is to provide a user-friendly interface for developers, validators, and other stakeholders to interact with and extract insights from blockchain data. The **Solar** platform can be deployed on decentralized infrastructure like **Akash**, ensuring cost-effective, scalable, and censorship-resistant hosting. + +This guide outlines how to deploy **Solar** on Akash using the `upstage/solar` Docker image and a sample SDL (Service Definition Language) template. + +--- + +## **Prerequisites** + +1. **Akash CLI**: Install the Akash CLI on your system by following the [official guide](https://docs.akash.network/guides/cli). +2. **Akash Wallet**: Set up an Akash wallet with sufficient funds to pay for your deployment. +3. **Docker Knowledge**: Basic familiarity with Docker images. +4. **Solar Docker Image**: The prebuilt Docker image `upstage/solar`. + +--- + +## **Steps to Deploy Solar on Akash** + +### 1. Create a Deployment Folder +Start by creating a folder for your deployment files: +``` +mkdir akash-solar && cd akash-solar +``` + +### 2. Write the SDL File +The SDL file defines the specifications of your deployment, such as the container image, ports, and resources. Use the following example for deploying **Solar**: + +#### `deploy.yaml` +``` +version: "2.0" + +services: + solar: + image: upstage/solar:latest + env: + - NODE_ENV=production + expose: + - port: 80 + as: 80 + to: + - global + +profiles: + compute: + solar: + resources: + cpu: + units: 0.5 + memory: + size: 512Mi + storage: + size: 1Gi + placement: + akash: + attributes: + region: us-west + signedBy: + anyOf: + - akash.network + pricing: + solar: + denom: uakt + amount: 100 + +deployment: + solar: + akash: + profile: solar + count: 1 +``` + +### 3. Validate the SDL File +Run the following command to ensure your SDL file is correctly formatted: +``` +akash validate deploy.yaml +``` + +### 4. Deploy the SDL to Akash +#### a. Create a Deployment +Submit the SDL file to Akash: +``` +akash tx deployment create deploy.yaml --from --node --chain-id +``` + +#### b. Query Deployment Status +Monitor the status of your deployment: +``` +akash query deployment list --owner +``` + +#### c. Fund the Lease +Once the deployment is accepted, fund the lease with tokens: +``` +akash tx deployment deposit uakt --from +``` + +### 5. Access the Solar Dashboard +Once the deployment is live, Akash will provide an external IP address or hostname. Access the **Solar** dashboard in your browser at: +``` +http:// +``` + +--- + +## **Overview of the SDL File** +- **Services Section**: Defines the `solar` service using the Docker image `upstage/solar:latest` and exposes port `80` to the global network. +- **Profiles Section**: Configures compute resources, including CPU, memory, and storage. It specifies a pricing model for deployment in Akash tokens (`uakt`). +- **Deployment Section**: Links the service to the compute profile and sets the number of instances to `1`. + +--- + +## **Customizing the Deployment** +1. **Environment Variables**: Update `NODE_ENV` or add additional environment variables in the `env` section of the SDL file. +2. **Ports**: Modify the `port` and `as` fields under `expose` to match your desired setup. +3. **Resources**: Adjust `cpu`, `memory`, and `storage` based on your performance requirements. + +--- + +By following this guide, you can successfully deploy and host **Solar** on Akash, leveraging its decentralized infrastructure to power your blockchain explorer. \ No newline at end of file diff --git a/src/content/Docs/guides/data-viz/dash/index.md b/src/content/Docs/guides/data-analysis/dash/index.md similarity index 100% rename from src/content/Docs/guides/data-viz/dash/index.md rename to src/content/Docs/guides/data-analysis/dash/index.md diff --git a/src/content/Docs/guides/data-viz/dataiku/index.md b/src/content/Docs/guides/data-analysis/dataiku/index.md similarity index 100% rename from src/content/Docs/guides/data-viz/dataiku/index.md rename to src/content/Docs/guides/data-analysis/dataiku/index.md diff --git a/src/content/Docs/guides/data-analysis/druid/index.md b/src/content/Docs/guides/data-analysis/druid/index.md new file mode 100644 index 00000000..a11989fe --- /dev/null +++ b/src/content/Docs/guides/data-analysis/druid/index.md @@ -0,0 +1,160 @@ +--- +categories: ["Guides"] +tags: ["Data Analytics"] +weight: 1 +title: "Guide to Deploy Apache Druid on Akash Network" +linkTitle: "Apache Druid" +--- + +Apache Druid is a real-time analytics database designed for fast aggregation and exploration of large datasets. It is particularly suited for time-series data, enabling low-latency queries on high-ingest rates. Druid is widely used for applications like operational analytics, business intelligence dashboards, and interactive data exploration. + +**Key Features:** +- **Real-time ingestion**: Allows for streaming and batch data ingestion. +- **Columnar storage**: Optimized for analytical queries, offering high-speed data retrieval. +- **Scalability**: Built for horizontal scaling to handle petabyte-scale data. +- **High availability**: Provides redundancy and fault-tolerance with replication. +- **Flexible query models**: Supports SQL-like queries and Druid native queries. + +--- + +## Why Deploy Apache Druid on Akash? + +Akash Network is a decentralized cloud marketplace that allows users to deploy workloads at a fraction of the cost of traditional cloud providers. By deploying Apache Druid on Akash, you benefit from: +1. **Cost efficiency**: Lower operational costs for hosting large-scale infrastructure. +2. **Decentralization**: Increased control and reduced dependency on centralized cloud providers. +3. **Scalability**: Easily scale your cluster up or down based on requirements. +4. **Open-source synergy**: Both Druid and Akash are open-source, promoting flexibility and innovation. + +--- + +## Step-by-Step Guide to Deploy Apache Druid on Akash + +### 1. Prerequisites + +1. **Akash CLI and Wallet Setup:** + - Install the Akash CLI by following the [official documentation](docs/deployments/akash-cli/overview/). + - Fund your Akash wallet with sufficient AKT tokens. + +2. **Druid Docker Image:** + - Druid is available as a container image. You can use the official image from DockerHub: `apache/druid`. + +3. **Akash SDL Template:** + - Prepare an SDL (Stack Definition Language) file to define your deployment specifications. + +--- + +### 2. Prepare Your Deployment Files + +#### Sample SDL File for Druid Deployment + +Below is an example SDL file to deploy a basic Druid cluster with a single node: + +```yaml +version: "2.0" + +services: + druid: + image: apache/druid:latest + env: + - DRUID_NODE_TYPE=coordinator-overlord + - JAVA_OPTS=-Xmx4g -Xms4g + expose: + - port: 8081 + as: 80 + to: + - global: true + +profiles: + compute: + druid: + resources: + cpu: + units: 1 + memory: + size: 4Gi + storage: + size: 10Gi + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - akash1... + +deployment: + druid: + westcoast: + profile: + compute: druid + placement: westcoast + count: 1 +``` + +--- + +### 3. Configure and Deploy + +1. **Customize the SDL File:** + - Adjust resource requirements (CPU, memory, and storage) based on your workload. + - Specify the region or provider attributes. + +2. **Validate the SDL File:** + ```bash + akash tx deployment create deploy.yaml --from + ``` + +3. **Send Your Deployment to Akash:** + After successful validation, use the following commands to interact with the Akash marketplace: + ```bash + akash tx deployment create deploy.yaml --from + ``` + +4. **Approve Lease:** + Once bids are received, select the appropriate provider and approve the lease: + ```bash + akash tx market lease create --dseq --from + ``` + +--- + +### 4. Verify and Monitor + +1. **Access Druid UI:** + - Open your browser and navigate to the provider’s IP address or domain with port `8081`. + +2. **Monitor Logs:** + - Use the Akash CLI to check logs: + ```bash + akash provider lease logs --dseq --from + ``` + +--- + +### 5. Scale and Manage + +- To scale your deployment, update the `count` field in the SDL file for the `deployment` section. +- Redeploy the updated SDL file with: + ```bash + akash tx deployment update deploy.yaml --from + ``` + +--- + +## Best Practices for Deploying Apache Druid on Akash + +1. **Use Persistent Storage:** + - Configure volume mounts for data durability across container restarts. + +2. **Clustered Deployment:** + - For production workloads, deploy Druid in a clustered setup with multiple node types (e.g., broker, historical, and middle manager). + +3. **Secure Your Deployment:** + - Set up firewalls and secure ingress rules to restrict access to your Druid instance. + +4. **Monitor Costs:** + - Regularly review your usage to optimize resources and minimize costs. + +--- + +Deploying Apache Druid on Akash provides a scalable and cost-efficient solution for analytics workloads. Customize the deployment as per your requirements and leverage the decentralized power of Akash to reduce dependency on traditional cloud providers. \ No newline at end of file diff --git a/src/content/Docs/guides/data-analysis/elasticsearch/index.md b/src/content/Docs/guides/data-analysis/elasticsearch/index.md new file mode 100644 index 00000000..fb573df2 --- /dev/null +++ b/src/content/Docs/guides/data-analysis/elasticsearch/index.md @@ -0,0 +1,81 @@ +--- +categories: ["Guides"] +tags: ["Data Analysis"] +weight: 1 +title: "Elasticsearch" +linkTitle: "Elasticsearch" +--- + +To deploy Elasticsearch on the Akash Network, you’ll typically go through these steps: + +## 1. Prepare the Akash SDL File + +An Akash SDL file (in YAML) defines the resources and configuration needed for the deployment. Below is a sample configuration to deploy Elasticsearch. + +``` +--- +version: "2.0" + +services: + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:8.15.5 # Replace with the desired version + expose: + - port: 9200 + to: + - global: true + env: + - discovery.type=single-node # Configure for a single-node deployment + - ES_JAVA_OPTS=-Xms512m -Xmx512m # Adjust memory limits as needed + +profiles: + compute: + elasticsearch: + resources: + cpu: + units: 1 + memory: + size: 1Gi + storage: + size: 5Gi # Adjust as needed + + placement: + westcoast: + attributes: + region: us-west + pricing: + elasticsearch: + denom: uakt + amount: 500 + +deployment: + elasticsearch: + westcoast: + profile: elasticsearch + count: 1 +``` + +2. Deploy Using Akash CLI + +Make sure you have the Akash CLI installed and configured. + + 1. **Initialize the deployment**: + + + ``` + akash tx deployment create .yml --from --chain-id --node + ``` + + 2. **Wait for bid completion and accept the lease**: You’ll need to monitor for a bid and accept the lease using Akash CLI commands. + + 3. **Access Elasticsearch**: Once deployed, you should be able to access your Elasticsearch instance via the external IP and the specified port (9200 in this case). + +3. Testing Elasticsearch + +After deployment, verify that Elasticsearch is accessible by sending an HTTP request to the endpoint: + +``` + +curl -X GET "http://:9200/" +``` + +This SDL file provides a basic single-node Elasticsearch setup. For production, you might want a multi-node setup, secure configurations, and perhaps additional monitoring services. \ No newline at end of file diff --git a/src/content/Docs/guides/data-analysis/grafana/index.md b/src/content/Docs/guides/data-analysis/grafana/index.md new file mode 100644 index 00000000..3d66e140 --- /dev/null +++ b/src/content/Docs/guides/data-analysis/grafana/index.md @@ -0,0 +1,160 @@ +--- +categories: ["Guides"] +tags: ["Data Visualization"] +weight: 1 +title: "Grafana" +linkTitle: "Grafana" +--- + +Here’s a step-by-step guide for deploying Grafana on Akash using the **grafana/grafana** Docker image: + +--- + +### **Step 1: Install Akash CLI** +Ensure you have the Akash CLI installed and configured. Follow [Akash CLI installation documentation](https://docs.akash.network/cli/install) to set up the CLI and connect to your wallet. + +--- + +### **Step 2: Prepare Your SDL File** +Create a file named `deploy.yaml` to define your deployment configuration. + +``` +--- +version: "2.0" + +services: + grafana: + image: grafana/grafana:latest + env: + - GF_SECURITY_ADMIN_USER=admin # Grafana Admin Username + - GF_SECURITY_ADMIN_PASSWORD=admin # Grafana Admin Password + expose: + - port: 3000 + as: 80 + to: + - global + +profiles: + compute: + grafana: + resources: + cpu: + units: 0.5 + memory: + size: 512Mi + storage: + size: 1Gi + + placement: + akash: + attributes: + region: us-west # Choose an Akash region + pricing: + grafana: + denom: uakt + amount: 1000 # Price in Akash tokens + +deployment: + grafana: + grafana: + profile: grafana + count: 1 +``` + +### **Explanation of Key Parameters:** +- **Image**: `grafana/grafana:latest` is the official Grafana Docker image. +- **Environment Variables**: `GF_SECURITY_ADMIN_USER` and `GF_SECURITY_ADMIN_PASSWORD` are used to set up the Grafana admin credentials. +- **Expose**: Expose port 3000 (Grafana’s default) to port 80 for external access. +- **Resources**: Allocate 0.5 CPUs, 512Mi memory, and 1Gi storage. +- **Pricing**: Adjust pricing based on your budget and network bids. + +--- + +### **Step 3: Validate the SDL File** +Run the following command to ensure your SDL file is valid: + +``` +akash validate deploy.yaml +``` + +--- + +### **Step 4: Create a Deployment** +Submit the deployment to the Akash network: + +``` +akash tx deployment create deploy.yaml --from --node +``` + +### **Step 5: Bid Selection** +Once your deployment is live, select a bid: + +``` +akash query market lease list --owner --node +``` + +Identify a suitable bid and accept it: + +``` +akash tx market lease create --owner --dseq --oseq --gseq --provider --from +``` + +--- + +### **Step 6: Access Your Deployment** +1. Get the access details from your provider: + + ``` + akash provider lease-status --dseq --gseq --oseq --provider --from + ``` + +2. Note the `Service URI` for the Grafana service (e.g., `http://:`). + +3. Open the URL in your browser and log in using the admin credentials you specified in the SDL file (`admin/admin` in this example). + +--- + +### **Step 7: Secure Your Grafana Instance** +1. Update your admin password for security. +2. Configure HTTPS using a reverse proxy or Akash’s ingress configurations if required. + +--- + +### **Step 8: (Optional) Add Persistent Storage** +To make your Grafana setup persistent: +1. Use decentralized storage solutions like Filecoin or Storj. +2. Update your SDL file to include persistent volume mounts (e.g., Akash's [PVC](https://docs.akash.network/guides/deployments/storage)). + +--- + +### Example Persistent Storage SDL Snippet: +``` +profiles: + compute: + grafana: + resources: + cpu: + units: 0.5 + memory: + size: 512Mi + storage: + size: 5Gi # Adjust based on your Grafana storage needs + persistent_storage: + - mount: /var/lib/grafana + class: default +``` + +--- + +### **Step 9: Monitor and Scale Your Deployment** +Monitor Grafana logs and resource usage via Akash’s CLI: + +``` +akash provider lease-logs --dseq --gseq --oseq --provider --from +``` + +Scale your deployment if needed by modifying the `count` parameter in the SDL file or increasing resources (CPU/memory). + +--- + +By following this guide, you’ll have a fully operational Grafana instance running on Akash’s decentralized cloud! Let me know if you need further assistance. diff --git a/src/content/Docs/guides/data-analysis/jatos/index.md b/src/content/Docs/guides/data-analysis/jatos/index.md new file mode 100644 index 00000000..2cb6c608 --- /dev/null +++ b/src/content/Docs/guides/data-analysis/jatos/index.md @@ -0,0 +1,156 @@ +--- +categories: ["Guides"] +tags: ["Data Analysis"] +weight: 1 +title: "JATOS" +linkTitle: "JATOS" +--- + + +Here’s a guide to deploying JATOS on Akash using the `jatos/jatos` Docker image. + +--- + +## **What is JATOS?** +JATOS (Just Another Tool for Online Studies) is an open-source software designed to run online studies, often used in psychology, social sciences, and behavioral research. + +--- + +## **Prerequisites** +1. **Akash Account**: Ensure you have an Akash account and wallet set up. +2. **AKT Tokens**: Sufficient tokens for deployment. +3. **Akash CLI**: Installed and configured. +4. **Akash SDL Template**: A customizable SDL file for deploying the `jatos/jatos` image. +5. **Docker Image**: `jatos/jatos` is the official Docker image for JATOS. +6. **Domain/Static IP**: Optional, but helpful if you want to expose the JATOS instance publicly. + +--- + +## **Steps to Deploy JATOS on Akash** + +### 1. **Prepare Your Akash SDL File** +Create an SDL file (`deploy.yaml`) that defines your JATOS deployment. Below is an example configuration: + +``` +--- +version: "2.0" + +services: + jatos: + image: jatos/jatos + env: + - JATOS_IP=0.0.0.0 + - JATOS_PORT=80 + expose: + - port: 80 + as: 80 + to: + - global + accept: [ "http" ] + resources: + cpu: + units: 1 + memory: + size: 512Mi + storage: + size: 1Gi + +profiles: + compute: + jatos: + resources: + cpu: + units: 1 + memory: + size: 512Mi + storage: + size: 1Gi + + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - akash1abcdefghijklmnopqrstu # Replace with the provider's public key + pricing: + jatos: + denom: uakt + amount: 100 + +deployment: + jatos: + westcoast: + profile: jatos + count: 1 +``` + +--- + +### 2. **Configure JATOS** +Update the following variables in the SDL file if needed: + +- **Environment Variables**: + - `JATOS_IP`: Set to `0.0.0.0` to listen on all interfaces. + - `JATOS_PORT`: Set the port JATOS will run on. + - Add more environment variables if needed (e.g., `DB_HOST`, `DB_PORT` for an external database). + +--- + +### 3. **Deploy to Akash** +1. **Validate SDL File**: Use the Akash CLI to validate your SDL file. + ``` + akash tx deployment create deploy.yaml --from + ``` +2. **Query Deployment**: Check the status of your deployment. + ``` + akash query market lease list --owner + ``` +3. **Accept a Bid**: Once a provider submits a bid, accept it. + +--- + +### 4. **Access JATOS** +- Once deployed, JATOS will be accessible via the endpoint provided by the Akash provider. +- You can access the application by navigating to `http://:` in your browser. + +--- + +### 5. **Persisting Data** +To ensure persistent data storage: +- Use Akash’s persistent storage options (modify the SDL file under `storage`). +- Alternatively, connect JATOS to an external database like MySQL or PostgreSQL. Update the environment variables in the SDL file to point to the database. + +--- + +## **Post-Deployment Tasks** +1. **Secure JATOS**: + - Use HTTPS for secure communication. Consider using a reverse proxy (e.g., Traefik or NGINX) with SSL certificates. +2. **Monitor Resource Usage**: + - Adjust the CPU and memory limits in the SDL file as per your workload. +3. **Scaling**: + - If needed, you can scale JATOS instances by increasing the `count` value in the deployment section of the SDL file. + +--- + +## **Troubleshooting** +1. **Deployment Fails**: + - Ensure the Akash provider supports the required resources. + - Check for errors in the deployment logs. + +2. **JATOS Not Accessible**: + - Verify that the `expose` section in the SDL file is correctly configured. + - Ensure the provider has opened the necessary ports. + +3. **Database Issues**: + - Confirm that the database is accessible from the Akash deployment. + - Double-check the database credentials in the environment variables. + +--- + +## **Useful Links** +- [Akash CLI Documentation](https://docs.akash.network/) +- [JATOS Official Docker Image](https://hub.docker.com/r/jatos/jatos) +- [JATOS Documentation](https://www.jatos.org/) + +By following this guide, you should be able to deploy JATOS on Akash and run your online studies effectively. \ No newline at end of file diff --git a/src/content/Docs/guides/data-analysis/jupyterhub/index.md b/src/content/Docs/guides/data-analysis/jupyterhub/index.md new file mode 100644 index 00000000..154f3170 --- /dev/null +++ b/src/content/Docs/guides/data-analysis/jupyterhub/index.md @@ -0,0 +1,132 @@ +--- +categories: ["Guides"] +tags: ["Data Analysis"] +weight: 1 +title: "Guide to Deploying JupyterHub on Akash" +linkTitle: "JupyterHub" +--- + + + +## **What is JupyterHub?** + +JupyterHub is a multi-user server for Jupyter notebooks, allowing multiple users to work on their own Jupyter notebooks simultaneously. It provides centralized management and customization for educational, research, or corporate environments, where teams or students need a collaborative platform for data science, machine learning, or development. + +With JupyterHub, users can: +- Access notebooks from a web browser. +- Run code in languages like Python, R, and Julia. +- Share resources in a scalable way. + +This guide shows how to deploy **JupyterHub** using the `jupyterhub/jupyterhub` Docker image on **Akash**. + +--- + +## **Prerequisites** + +1. **Akash CLI or Deployment Tool**: Install and set up Akash CLI on your machine. +2. **Akash Account**: Ensure you have AKT tokens and an active account. +3. **Docker Image**: Use the official Docker image `jupyterhub/jupyterhub`. +4. **Domain and SSL (Optional)**: If deploying for public use, you might want a domain with SSL. + +--- + +## **Deployment Steps** + +### 1. **Prepare the SDL File** +The **SDL (Service Definition Language)** file defines how the application will run on Akash. Below is an example SDL file for deploying JupyterHub. + +``` +version: "2.0" + +services: + jupyterhub: + image: jupyterhub/jupyterhub:latest + env: + - JUPYTERHUB_CRYPT_KEY=some-secret-key # Replace with a secure key + - JUPYTERHUB_ADMIN_USER=admin # Replace with admin username + expose: + - port: 8000 # JupyterHub default port + as: 80 # Expose on port 80 + to: + - global + +profiles: + compute: + jupyterhub: + resources: + cpu: + units: 1 # Adjust CPU units + memory: + size: 2Gi # Set memory size + storage: + size: 10Gi # Allocate storage + + placement: + jupyterhub: + attributes: + region: us-west # Adjust region as needed + signedBy: + anyOf: + - akash + +deployment: + jupyterhub: + jupyterhub: + profile: jupyterhub + count: 1 # Number of replicas +``` + +### 2. **Customize the Configuration** +- **JUPYTERHUB_CRYPT_KEY**: Replace with a secure, randomly generated key for encrypting user cookies. +- **JUPYTERHUB_ADMIN_USER**: Set this to your desired admin username. +- **Resources**: Adjust CPU, memory, and storage requirements to match your workload. + +### 3. **Deploy to Akash** +Use the Akash CLI to deploy the SDL file. + +``` +akash tx deployment create deploy.yaml --from +``` + +### 4. **Accept a Bid** +Once a provider bids on your deployment, accept the bid to launch the service: + +``` +akash tx market lease create --dseq --from +``` + +### 5. **Access JupyterHub** +After deployment, obtain the service endpoint by running: + +``` +akash provider lease-status --dseq --from +``` + +The endpoint will look something like `http://:`. Use this in your browser to access JupyterHub. + +--- + +## **Post-Deployment Configuration** + +1. **Add Users**: + - Use the admin panel or update the `jupyterhub_config.py` file to manage users. + - Example: + ```python + c.Authenticator.allowed_users = {'user1', 'user2'} + c.Authenticator.admin_users = {'admin'} + ``` + +2. **Persistent Storage (Optional)**: + - Use a persistent storage solution like decentralized storage (e.g., Filecoin, IPFS) or attach volumes. + +3. **SSL Configuration**: + - If running in production, use a reverse proxy like NGINX or Traefik with Let's Encrypt for HTTPS. + +4. **Scaling**: + - To scale the deployment, increase the `count` parameter in the SDL file and redeploy. + +--- + +## **Conclusion** + +Deploying JupyterHub on Akash offers an affordable and decentralized way to host multi-user Jupyter notebooks. This setup is especially useful for education, research, and collaborative projects. By leveraging Akash's decentralized cloud infrastructure, you can reduce hosting costs and maintain flexibility. \ No newline at end of file diff --git a/src/content/Docs/guides/data-analysis/kibana/index.md b/src/content/Docs/guides/data-analysis/kibana/index.md new file mode 100644 index 00000000..197fc0dd --- /dev/null +++ b/src/content/Docs/guides/data-analysis/kibana/index.md @@ -0,0 +1,76 @@ +--- +categories: ["Guides"] +tags: ["Data Analysis"] +weight: 1 +title: "Kibana" +linkTitle: "Kibana" +--- + +Kibana requires Elasticsearch as its data source, so ideally, both should be deployed to Akash if they aren’t accessible elsewhere. + +## 1. Define the Akash Deployment File + +``` +--- +version: "2.0" + +services: + kibana: + image: docker.elastic.co/kibana/kibana:8.10.2 # Specify the desired version of Kibana + env: + - ELASTICSEARCH_HOSTS=http://your_elasticsearch_url:9200 # Replace with actual Elasticsearch endpoint + expose: + - port: 5601 # Kibana default port + as: 80 + to: + - global: true + +profiles: + compute: + kibana: + resources: + cpu: + units: 0.5 # Adjust according to expected workload + memory: + size: 2Gi + storage: + size: 1Gi + placement: + akash: + pricing: + kibana: + denom: uakt + amount: 100000 # Adjust based on the cost you want to set + +deployment: + kibana: + akash: + profile: kibana + count: 1 +``` + +save the file as `deploy.yaml`. + +## 2. Modify the Environment Variables (Optional) + +Adjust the environment variables to configure Kibana according to your requirements. The `ELASTICSEARCH_HOSTS` variable should point to your Elasticsearch instance. + +## 3. Deploying to Akash + +1. **Install the Akash CLI if you haven’t already**: + +``` +curl https://raw.githubusercontent.com/ovrclk/akash/master/godownloader.sh | sh +``` + +2. **Authenticate with Akash using your wallet**: +``` +akash tx authz grant --from --keyring-backend +``` +3. **Submit the Deployment**: +``` +akash tx deployment create --owner --from --dseq --keyring-backend --node https://rpc-akash..org --chain-id akashnet-2 --file kibana_deployment.yaml +``` +4. **Approve the Bid and Lease**: Use Akash CLI or Akashlytics to view and accept bids for your deployment. Once you find a provider, you can finalize the lease. + +5. **Access Kibana**: Once deployed, Kibana will be accessible at the provider's public IP. \ No newline at end of file diff --git a/src/content/Docs/guides/data-viz/knime/index.md b/src/content/Docs/guides/data-analysis/knime/index.md similarity index 100% rename from src/content/Docs/guides/data-viz/knime/index.md rename to src/content/Docs/guides/data-analysis/knime/index.md diff --git a/src/content/Docs/guides/data-analysis/matomo/index.md b/src/content/Docs/guides/data-analysis/matomo/index.md new file mode 100644 index 00000000..0d663b06 --- /dev/null +++ b/src/content/Docs/guides/data-analysis/matomo/index.md @@ -0,0 +1,165 @@ +--- +categories: ["Guides"] +tags: ["Data Visualization"] +weight: 1 +title: "Matomo" +linkTitle: "Matomo" +--- + + +Below is a guide on how to deploy **Matomo**, a powerful web analytics platform, on Akash Network. + + +## **Step 1: Install Prerequisites** +Before deploying, ensure you have: +1. **Akash CLI**: Installed and configured. +2. **Account Balance**: Sufficient AKT tokens in your wallet. +3. **Docker**: Installed locally for testing your Matomo deployment. +4. **Domain or Static IP**: For accessing Matomo after deployment. + +--- + +## **Step 2: Prepare the SDL File** +Here’s a sample SDL file for deploying Matomo. It uses an Nginx web server and a MySQL-compatible database (MariaDB). + +### **deploy.yaml** +``` +--- +version: "2.0" + +services: + matomo: + image: matomo:latest + env: + - MATOMO_DATABASE_HOST=mariadb + - MATOMO_DATABASE_USERNAME=matomo + - MATOMO_DATABASE_PASSWORD=yourpassword + - MATOMO_DATABASE_DBNAME=matomo + expose: + - port: 80 + as: 80 + to: + - global: true + depends_on: + - mariadb + + mariadb: + image: mariadb:10.5 + env: + - MYSQL_ROOT_PASSWORD=rootpassword + - MYSQL_DATABASE=matomo + - MYSQL_USER=matomo + - MYSQL_PASSWORD=yourpassword + expose: + - port: 3306 + as: 3306 + to: + - service: matomo + +profiles: + compute: + matomo-profile: + resources: + cpu: + units: 1 + memory: + size: 512Mi + storage: + size: 2Gi + mariadb-profile: + resources: + cpu: + units: 1 + memory: + size: 1Gi + storage: + size: 5Gi + + placement: + default: + attributes: + region: us-west + signedBy: + anyOf: + - akash1provideraddress... + pricing: + matomo-profile: + denom: uakt + amount: 100 + mariadb-profile: + denom: uakt + amount: 100 + +deployment: + matomo: + profile: matomo-profile + count: 1 + mariadb: + profile: mariadb-profile + count: 1 +``` + +--- + +## **Step 3: Steps to Deploy** + +### **1. Customize the SDL** +- Replace `yourpassword` and `rootpassword` with secure passwords. +- Update the `region` under `placement` if required. +- Adjust `pricing` values to match your budget. + +### **2. Validate the SDL File** +Run the following command to validate the SDL file: +``` +akash deployment sdl validate deploy.yaml +``` + +### **3. Create the Deployment** +Create the deployment using: +``` +akash tx deployment create deploy.yaml --from --chain-id --node +``` + +### **4. Bid for Deployment** +Wait for providers to submit bids and select a provider: +``` +akash tx market lease create --dseq --oseq 1 --gseq 1 --from +``` + +### **5. Retrieve Lease Information** +After the lease is created, get the lease information: +``` +akash query market lease list --owner +``` + +### **6. Access Matomo** +Once deployed: +1. Retrieve the service URL or IP using: + ``` + akash provider service-status --provider --dseq --from + ``` +2. Configure your domain (if applicable) to point to the service's external IP. + +3. Visit the URL to complete Matomo setup. + +--- + +## **Step 4: Complete the Matomo Setup** +1. Navigate to the Matomo web interface using the URL or IP. +2. Follow the on-screen instructions to set up: + - Database details: + - Host: `mariadb` + - Database: `matomo` + - User: `matomo` + - Password: Your chosen password. +3. Finish the installation and start using Matomo. + +--- + +## **Optional Enhancements** +- **TLS/SSL**: Use tools like [Certbot](https://certbot.eff.org/) with an Nginx reverse proxy. +- **Backup**: Set up periodic backups for the database and configuration files. + +--- + +You have now successfully deployed Matomo on Akash Network! \ No newline at end of file diff --git a/src/content/Docs/guides/data-analysis/opensearch/index.md b/src/content/Docs/guides/data-analysis/opensearch/index.md new file mode 100644 index 00000000..da27dffa --- /dev/null +++ b/src/content/Docs/guides/data-analysis/opensearch/index.md @@ -0,0 +1,125 @@ +--- +categories: ["Guides"] +tags: ["Search"] +weight: 1 +title: "OpenSearch" +linkTitle: "OpenSearch" +--- + +Here’s a step-by-step guide to deploying OpenSearch using the `opensearchproject/opensearch` Docker image on Akash, a decentralized cloud computing platform. + +--- + +### **Step 1: Install Akash CLI** +1. Download and install the Akash CLI from the [Akash GitHub releases page](https://github.com/akash-network/node/releases). +2. Ensure you have an Akash wallet with sufficient funds for deployment. + +--- + +### **Step 2: Write the SDL Deployment File** +Create a file called `deploy.yaml` for your OpenSearch deployment. Here’s a sample SDL template: + +``` +--- +version: "2.0" + +services: + opensearch: + image: opensearchproject/opensearch:latest + env: + - discovery.type=single-node # For standalone mode + expose: + - port: 9200 + as: 9200 + to: + - global + - port: 9600 + as: 9600 + to: + - global + +profiles: + compute: + opensearch: + resources: + cpu: + units: 2 + memory: + size: 4Gi + storage: + size: 10Gi + placement: + akash: + pricing: + opensearch: + denom: uakt + amount: 100 + +deployment: + opensearch: + opensearch: + profile: opensearch + count: 1 + placement: + akash: + profile: akash + count: 1 +``` + +--- + +### **Step 3: Deploy the SDL File on Akash** +1. **Upload the SDL File:** + Run the following command to validate and create a deployment from your SDL file: + ``` + akash tx deployment create deploy.yaml --from --chain-id --node + ``` + +2. **Wait for Deployment Approval:** + Akash will broadcast the deployment transaction. Wait for a provider to accept your bid. + +3. **Lease the Deployment:** + Once a bid is accepted, lease the deployment by running: + ``` + akash tx market lease create --dseq --oseq 1 --gseq 1 --from + ``` + +4. **Query Lease Details:** + Use the following command to get the deployment’s IP address: + ``` + akash provider lease-status --dseq --oseq 1 --gseq 1 --provider + ``` + +--- + +### **Step 4: Access OpenSearch** +1. Once deployed, OpenSearch will be available at the provider's public IP and port (e.g., `http://:9200`). +2. Use tools like `curl` or any HTTP client to interact with the OpenSearch API: + ``` + curl -X GET "http://:9200/_cat/health?v" + ``` + +--- + +### **Step 5: Monitor OpenSearch** +OpenSearch exposes a monitoring endpoint at port 9600. Access metrics and logs using: +``` +curl -X GET "http://:9600/_nodes/stats" +``` + +--- + +### **Optional: Persist Data Using External Storage** +By default, Akash deployments are stateless, and data will be lost if the container is restarted. To persist data: +1. Modify the SDL to include an external volume: + ``` + services: + opensearch: + volumes: + - /data:/mnt/data/opensearch + ``` +2. Attach persistent storage using Akash’s storage solutions. + +--- + +This configuration deploys a single-node OpenSearch setup. For clustering, update the `discovery.type` and network configurations accordingly. Let me know if you'd like additional customization! \ No newline at end of file diff --git a/src/content/Docs/guides/data-viz/redash/index.md b/src/content/Docs/guides/data-analysis/redash/index.md similarity index 100% rename from src/content/Docs/guides/data-viz/redash/index.md rename to src/content/Docs/guides/data-analysis/redash/index.md diff --git a/src/content/Docs/guides/data-analysis/rstudio/index.md b/src/content/Docs/guides/data-analysis/rstudio/index.md new file mode 100644 index 00000000..81b99a58 --- /dev/null +++ b/src/content/Docs/guides/data-analysis/rstudio/index.md @@ -0,0 +1,93 @@ +--- +categories: ["Guides"] +tags: ["Data Analysis"] +weight: 1 +title: "RStudio" +linkTitle: "RStudio" +--- + +## Prerequisites + +1. **Akash Account**: Ensure you have an Akash account with sufficient AKT to deploy. +2. **Akash CLI**: Install the Akash CLI, which you'll use to interact with the Akash Network. +3. **SDL Template**: Ensure you have the SDL template you previously shared. + +## Step-by-Step Deployment + +1. **Modify the SDL Template for RStudio** + +Update your SDL template with the necessary configuration to deploy RStudio. Here’s an example of how the SDL template might look: + +``` +version: "2.0" + +services: + rstudio: + image: "rocker/rstudio" # RStudio's official Docker image + env: + - PASSWORD= + expose: + - port: 8787 # Default RStudio port + as: 80 # Map to a publicly accessible port if needed + proto: tcp + to: + - global: true + +profiles: + compute: + rstudio: + resources: + cpu: + units: 500m + memory: + size: 1Gi + storage: + size: 1Gi + + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - akash + pricing: + rstudio: + denom: uakt + amount: 100 # Adjust according to your budget and requirements + +deployment: + rstudio: + westcoast: + profile: rstudio + count: 1 +``` + +Replace `` with a secure password of your choice. This will be the password for the RStudio instance. + +2. **Deploy the SDL Template on Akash** + +- Save the modified SDL file (e.g., deploy-rstudio.yaml). + +- In your terminal, navigate to the directory containing this SDL file. + +- Run the following command to initialize the deployment: + +``` +akash tx deployment create deploy-rstudio.yaml --from --chain-id --node +``` + +3. **Monitor the Deployment** + +- Use Akash’s CLI commands to check the status of your deployment. +- Once it is deployed, note the external IP and port provided for the service. + +4. **Access RStudio** + +- Open a browser and navigate to http://:80. +- Log in with the username rstudio and the password you set earlier. + +## Additional Tips + +- **Storage and Memory**: You may need to adjust the `memory` and `storage` configurations in the SDL file depending on your workload. +- **Security**: For production deployments, consider securing the deployment with SSL or limiting access. \ No newline at end of file diff --git a/src/content/Docs/guides/data-analysis/superset/index.md b/src/content/Docs/guides/data-analysis/superset/index.md new file mode 100644 index 00000000..b3c13130 --- /dev/null +++ b/src/content/Docs/guides/data-analysis/superset/index.md @@ -0,0 +1,111 @@ +--- +categories: ["Guides"] +tags: ["Data Analysis"] +weight: 1 +title: "Apache Superset" +linkTitle: "Apache Superset" +--- + +To deploy Apache Superset on the Akash network, follow these steps: + +## Prerequisites + +1. Akash Wallet with sufficient funds. +2. Akash CLI installed on your system and configured. +3. YAML Configuration File for Apache Superset. Here’s an example configuration that you can customize. + +## 1. Create the YAML File for Apache Superset + +Create a file named `deploy.yaml` and configure it with the required specifications to deploy Apache Superset. This configuration will define the resources, Docker image, and other settings for the deployment. + +Here’s an example configuration: + +``` +--- +version: "2.0" + +services: + superset: + image: apache/superset:latest + expose: + - port: 8088 + as: 80 + to: + - global: true + env: + SUPERSET_ENV: production + SUPERSET_SECRET_KEY: + DATABASE_URL: # Replace with your database URL + args: + - "--timeout 60" + +profiles: + compute: + superset: + resources: + cpu: + units: 0.5 + memory: + size: 1Gi + storage: + size: 5Gi + + placement: + akash: + pricing: + superset: + denom: uakt + amount: 100 # Set the price you are willing to pay + +deployment: + superset: + superset: + profile: + compute: superset + placement: akash + count: 1 +``` + +## 2. Customize the YAML Configuration + +Update the following placeholders in the `deploy.yaml` file: + + - **SUPERSET_SECRET_KEY**: Replace `` with a secure, random key. + - **DATABASE_URL**: Replace `` with the database URL for Superset’s metadata (e.g., PostgreSQL, MySQL). + +## 3. Deploy Apache Superset on Akash + +1. Initialize the deployment by creating an Akash deployment with the deploy.yml file: + +``` +akash tx deployment create deploy.yml --from --chain-id --node +``` + +2. Verify the deployment status to check if it is live: + +``` +akash query deployment list --owner +``` + +3. Find and accept a bid by checking available bids for your deployment and accepting one: + +``` +akash query market bid list --owner +# Accept a bid +akash tx market lease create --from --dseq --oseq --gseq --provider +``` + +4. Retrieve the lease endpoint to access your Apache Superset instance: + + ``` + akash query market lease status --owner --provider --dseq --gseq --oseq + ``` +The endpoint will provide the public IP or domain where you can access your Apache Superset instance. Navigate to this address in your web browser to start using Apache Superset. + +## Additional Configuration + +- **Persistent Storage**: If needed, adjust the storage size in the compute section. +- **Scaling**: Adjust the count value under deployment to scale horizontally if needed. + +This setup will run Apache Superset on Akash using minimal resources, but you may scale up CPU, memory, and storage based on your requirements. + diff --git a/src/content/Docs/guides/data-viz/metabase/index.md b/src/content/Docs/guides/data-viz/metabase/index.md deleted file mode 100644 index 279e00a2..00000000 --- a/src/content/Docs/guides/data-viz/metabase/index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -categories: ["Guides"] -tags: ["Data Visualization"] -weight: 1 -title: "Metabase" -linkTitle: "Metabase" ---- diff --git a/src/content/Docs/guides/databases/anzograph/index.md b/src/content/Docs/guides/databases/anzograph/index.md new file mode 100644 index 00000000..71b87d65 --- /dev/null +++ b/src/content/Docs/guides/databases/anzograph/index.md @@ -0,0 +1,138 @@ +--- +categories: ["Guides"] +tags: ["Database", "SPARQL", "Graph",] +weight: 1 +title: "Deploying AnzoGraph on Akash" +linkTitle: "AnzoGraph" +--- + + +AnzoGraph is a high-performance graph database designed for advanced analytics and querying of connected data. It supports SPARQL and various graph-based analytics, making it ideal for use cases in data integration, knowledge graphs, and semantic reasoning. With its scalability and in-memory processing, it is tailored for large-scale enterprise data workloads. By deploying AnzoGraph on Akash, you leverage decentralized, cost-effective cloud infrastructure for hosting. + +### **Why Deploy on Akash?** +- **Cost Efficiency:** Pay-as-you-go decentralized compute power. +- **Scalability:** Scale resources easily as your graph data and queries grow. +- **Decentralization:** Avoid vendor lock-in with a blockchain-based cloud platform. + +--- + +## **Steps to Deploy AnzoGraph on Akash** + +### **1. Prerequisites** +- **Akash Wallet**: Ensure you have an Akash wallet with sufficient $AKT tokens for deployment. +- **Akash CLI**: Install and configure the Akash CLI for interacting with the network. +- **SDL Template**: Prepare a manifest file to define your deployment requirements. +- **Docker Image**: Use the official Docker image `cambridgesemantics/anzograph`. + +### **2. Create the SDL Manifest File** +The SDL file describes the deployment configuration for Akash. Below is an example: + +``` +--- +version: "2.0" + +services: + anzograph: + image: cambridgesemantics/anzograph:latest + expose: + - port: 8080 + as: 80 + to: + - global: true + - port: 9000 + as: 9000 + to: + - global: true + env: + - ANZOGRAPH_LICENSE_ACCEPTED=true # Required to accept the AnzoGraph license + resources: + cpu: + units: 1.0 + memory: + size: 2Gi + storage: + size: 10Gi + +profiles: + compute: + anzograph-profile: + resources: + cpu: + units: 1.0 + memory: + size: 2Gi + storage: + size: 10Gi + + placement: + default: + attributes: + host: akash + signedBy: + anyOf: + - akash + pricing: + anzograph-profile: + denom: uakt + amount: 100 + +deployment: + anzograph-deployment: + profiles: + compute: anzograph-profile + placement: default + services: + - anzograph +``` + +### **3. Steps to Deploy** + +1. **Validate SDL File**: + Ensure the syntax is correct by running: + ```bash + akash tx deployment create deploy.yaml --from + ``` + +2. **Upload the SDL File**: + Deploy the service using: + ```bash + akash deployment create deploy.yaml + ``` + +3. **Monitor Deployment**: + Track the deployment status: + ```bash + akash query deployment get --owner --dseq + ``` + +4. **Access AnzoGraph**: + Once the deployment is successful, you will be provided with the service endpoint. Access AnzoGraph's admin interface or SPARQL endpoint via the exposed ports (default `80` and `9000`). + +--- + +## **Post-Deployment Configuration** +1. **Verify Installation**: + - Open your browser and navigate to `http://`. + - Confirm the AnzoGraph interface is accessible. + +2. **Load Data**: + - Use the SPARQL endpoint to load your graph datasets. + - Example: + ```sparql + LOAD + ``` + +3. **Run Queries**: + - Start running SPARQL queries to analyze your graph data. + +4. **Monitor Resources**: + - Regularly check resource usage (CPU, memory) through Akash or AnzoGraph's interface. + +--- + +## **Tips for Optimal Performance** +- **Scale Resources**: Adjust the `cpu`, `memory`, and `storage` in the SDL manifest based on your workload. +- **Persistent Storage**: Use external storage solutions if you need persistent data across deployments. +- **Networking**: Secure access by setting up specific firewalls or access rules. + +By deploying AnzoGraph on Akash, you combine the power of advanced graph analytics with a decentralized, cost-effective infrastructure. \ No newline at end of file diff --git a/src/content/Docs/guides/databases/arangodb/index.md b/src/content/Docs/guides/databases/arangodb/index.md new file mode 100644 index 00000000..b3fc2eaa --- /dev/null +++ b/src/content/Docs/guides/databases/arangodb/index.md @@ -0,0 +1,135 @@ +--- +categories: ["Guides"] +tags: ["Database", "SQL", "Version Control"] +weight: 1 +title: "Guide to Deploy ArangoDB on Akash Network" +linkTitle: "ArangoDB" +--- + + +##**Overview of ArangoDB** + +ArangoDB is a powerful, multi-model database that supports key-value, document, and graph data models. It is open-source and designed for scalability, high performance, and flexibility. Its capabilities include: + +- **Multi-Model Database**: Combines key-value, document, and graph database functionalities in a single engine. +- **AQL (ArangoDB Query Language)**: A highly expressive SQL-like query language. +- **High Availability**: Supports replication and sharding for distributed setups. +- **Built-in Graph Processing**: Ideal for applications requiring relationship analysis. + +Deploying ArangoDB on **Akash Network**, a decentralized cloud platform, leverages its cost efficiency and distributed infrastructure to host a resilient and scalable database setup. + +--- + +## **Sample SDL for ArangoDB Deployment on Akash** + +Here is an example SDL (Stack Definition Language) file to deploy ArangoDB on the Akash Network: + +```yaml +--- +version: "2.0" + +services: + arangodb: + image: arangodb:latest + env: + - ARANGO_ROOT_PASSWORD=changeme # Set the root password + expose: + - port: 8529 + as: 80 + to: + - global + +profiles: + compute: + arangodb: + resources: + cpu: + units: 0.5 + memory: + size: 1Gi + storage: + size: 5Gi + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - akash1qqlx6... # Replace with the provider's address + pricing: + arangodb: + denom: uakt + amount: 100 + +deployment: + arangodb: + westcoast: + profile: arangodb + count: 1 +``` + +--- + +## **Steps to Deploy ArangoDB on Akash** + +1. **Install Akash CLI** + Ensure you have the Akash CLI installed and configured on your machine. Follow the [official Akash CLI installation guide](#) if you haven’t already. + +2. **Create the SDL File** + Copy the above SDL into a file named `deploy.yml`. Customize the environment variables, such as `ARANGO_ROOT_PASSWORD`, and ensure the `placement` section reflects the regions and providers you wish to deploy to. + +3. **Validate the SDL** + Run the following command to ensure your SDL file is valid: + ```bash + akash validate deploy.yml + ``` + +4. **Send the Deployment to Akash** + Deploy your application by creating a deployment: + ```bash + akash tx deployment create deploy.yml --from --chain-id --node + ``` + Replace ``, ``, and `` with your Akash account details. + +5. **Bid Acceptance** + Monitor bids for your deployment and accept a provider’s bid: + ```bash + akash query market bid list --owner + akash tx market lease create --owner --dseq --from + ``` + +6. **Access Your Deployment** + After accepting the bid and starting the deployment, retrieve the deployment's endpoint: + ```bash + akash query deployment get + ``` + Access ArangoDB via the public endpoint on port `80` (or the port you specified in the SDL). + +7. **Secure Your Database** + - Use a strong password for `ARANGO_ROOT_PASSWORD`. + - Restrict access by deploying a reverse proxy or firewall configuration to limit exposure of the database. + +--- + +## **Scaling and Customization** + +To scale your deployment: +- Increase the `count` value in the deployment section for multiple instances. +- Adjust `cpu`, `memory`, and `storage` resources under the `compute` profile to meet your application's requirements. + +--- + +## **Troubleshooting** + +1. **Logs**: Retrieve logs from your deployment: + ```bash + akash tx deployment logs + ``` + +2. **Issues with Bids**: Ensure sufficient funds are deposited into your Akash wallet. + +3. **Connectivity Issues**: Check if your deployment is correctly exposing ports and if the provider is reachable. + +--- + +By deploying ArangoDB on Akash, you can utilize decentralized cloud infrastructure to build highly available and scalable applications at a fraction of the cost of traditional cloud providers. \ No newline at end of file diff --git a/src/content/Docs/guides/databases/cockroachdb/index.md b/src/content/Docs/guides/databases/cockroachdb/index.md new file mode 100644 index 00000000..8857a8b0 --- /dev/null +++ b/src/content/Docs/guides/databases/cockroachdb/index.md @@ -0,0 +1,126 @@ +--- +categories: ["Guides"] +tags: ["Database", ] +weight: 1 +title: "Guide to Deploy CockcroachDB on Akash " +linkTitle: "CockcroachDB" +--- + +## Deploying CockroachDB on Akash Network + +This guide demonstrates how to deploy **CockroachDB**, a cloud-native distributed SQL database, on the **Akash Network**. Akash is a decentralized cloud computing platform where you can host containerized applications. + +--- + +## Prerequisites +1. **Akash CLI**: Installed and configured with your wallet and access to testnet/mainnet. +2. **Docker**: To create a container for CockroachDB. +3. **CockroachDB Docker Image**: Official image from `cockroachdb/cockroach`. +4. **Akash SDL Template**: A pre-configured SDL file for deployment. + +--- + +## Steps + +### 1. Prepare the CockroachDB Deployment +CockroachDB can be run as a standalone instance or as a cluster. For this guide, we will deploy a single-node instance. + +### 2. Write the SDL File +Create a file named `deploy.yaml` and add the following SDL configuration: + +``` +version: "2.0" + +services: + cockroachdb: + image: cockroachdb/cockroach:v23.1.1 # Replace with the desired version + expose: + - port: 26257 + as: 26257 + to: + - global: true # Expose to the public internet + - port: 8080 + as: 8080 + to: + - global: true + args: + - start-single-node + - --insecure # For simplicity, remove this for production use + +profiles: + compute: + cockroachdb: + resources: + cpu: + units: 2 + memory: + size: 2Gi + storage: + size: 10Gi + + placement: + cockroachdb: + pricing: + cockroachdb: + denom: uakt + amount: 100 # Specify your budget + +deployment: + cockroachdb: + cockroachdb: + profile: cockroachdb + count: 1 +``` + +### Explanation of the SDL File: +- **`image`**: The Docker image for CockroachDB. +- **`expose`**: Ports 26257 (SQL) and 8080 (Web UI) are exposed for access. +- **`args`**: Starts a single-node CockroachDB instance in insecure mode for simplicity. +- **`resources`**: Configures compute, memory, and storage requirements. +- **`pricing`**: Sets the budget in `uakt` (Akash tokens). + +--- + +### 3. Deploy the SDL File +1. Open a terminal and deploy the SDL file using the Akash CLI: + ``` + akash tx deployment create deploy.yaml --from --node https://rpc.akash.network --chain-id + ``` +2. Wait for the deployment to initialize. You can monitor the status with: + ``` + akash query deployment list --owner + ``` + +### 4. Access CockroachDB +- **SQL Port**: Access the SQL interface on `26257`. +- **Web UI**: Access the CockroachDB web UI on `8080` using the Akash-leased hostname. + + Example: + ``` + http://:8080 + ``` + +--- + +### 5. Test and Verify +1. Use the `cockroach sql` command to connect: + ``` + cockroach sql --url "postgresql://:26257?sslmode=disable" + ``` +2. Use the Web UI to verify that the database is running. + +--- + +### 6. (Optional) Scaling to a Cluster +To deploy a multi-node CockroachDB cluster: +1. Adjust the SDL file to add additional nodes and define `--join` arguments for cluster setup. +2. Use a shared network for nodes. + +--- + +### Notes +- Replace `--insecure` with SSL certificates for production deployments. +- Monitor resources to ensure your Akash deployment meets CockroachDB’s performance requirements. +- Use persistent storage configurations if required. + +This deployment method leverages CockroachDB's containerized image and Akash's decentralized cloud for a cost-effective and distributed database solution. \ No newline at end of file diff --git a/src/content/Docs/guides/databases/couchbase/index.md b/src/content/Docs/guides/databases/couchbase/index.md new file mode 100644 index 00000000..c32cfba9 --- /dev/null +++ b/src/content/Docs/guides/databases/couchbase/index.md @@ -0,0 +1,164 @@ +--- +categories: ["Guides"] +tags: ["Database", "NoSQL"] +weight: 1 +title: "Couchbase" +linkTitle: "Couchbase" +--- + + + + +Couchbase is a distributed NoSQL database designed for interactive applications. It combines the benefits of a memory-first architecture, rich query capabilities, and a powerful indexing engine. Couchbase is often used for applications requiring high availability, scalability, and performance. + +**Key Features of Couchbase:** +- **JSON Data Storage:** Flexible schema to adapt to dynamic application needs. +- **N1QL Query Language:** SQL-like query syntax for JSON data. +- **Memory-First Architecture:** Ensures low latency and high throughput. +- **Distributed Architecture:** Supports horizontal scaling. +- **Full-Text Search:** Built-in capabilities for advanced text-based queries. +- **Cross-Data Center Replication (XDCR):** Enables data replication across geographically distributed clusters. + +--- + +## **Deploying Couchbase on Akash** + +Akash is a decentralized cloud computing platform that allows developers to deploy containerized applications at a lower cost than traditional cloud providers. Here's a step-by-step guide to deploy Couchbase on Akash using the `couchbase` Docker image. + +--- + +### **1. Prepare the Deployment Environment** + +1. **Install Akash CLI:** + Follow the [official Akash CLI installation guide](docs/deployments/akash-cli/overview/) to set up your environment. + +2. **Fund Your Wallet:** + Ensure your Akash wallet is funded with sufficient AKT tokens to pay for the deployment. + +3. **Create the Deployment Directory:** + Set up a directory on your local machine to hold the deployment files. + +--- + +### **2. Create the SDL File** + +The SDL (Stack Definition Language) file specifies how Couchbase should be deployed on the Akash network. + +Below is an example SDL file for deploying Couchbase: + +``` +--- +version: "2.0" + +services: + couchbase: + image: couchbase + expose: + - port: 8091 + as: 8091 + to: + - global + - port: 8092 + as: 8092 + to: + - global + - port: 11210 + as: 11210 + to: + - global + - port: 11211 + as: 11211 + to: + - global + env: + - CB_USERNAME=admin # Default Couchbase username + - CB_PASSWORD=password # Default Couchbase password + resources: + cpu: + units: 1 # CPU units + memory: + size: 2Gi # Memory allocation + storage: + size: 10Gi # Disk storage + +profiles: + compute: + couchbase-profile: + resources: + cpu: + units: 1 + memory: + size: 2Gi + storage: + size: 10Gi + placement: + couchbase-placement: + attributes: + host: akash + signedBy: + anyOf: + - akash + pricing: + couchbase-pricing: + denom: uakt + amount: 1000 + +deployment: + couchbase-deployment: + profile: couchbase-profile + count: 1 +``` + +--- + +### **3. Deploy the SDL File** + +1. **Validate the SDL File:** + Run the following command to ensure your SDL file is correctly formatted: + ``` + akash tx deployment validate + ``` + +2. **Create the Deployment:** + Deploy the Couchbase service: + ``` + akash tx deployment create --from --chain-id --node + ``` + +3. **Bid and Lease Management:** + - Wait for providers to bid on your deployment. + - Select a provider and create a lease. + +4. **Access Your Couchbase Service:** + Once the lease is created, you'll receive the endpoint information. Use it to access Couchbase's web UI on `http://:8091`. + +--- + +### **4. Configure Couchbase** + +1. **Access the Admin UI:** + - Navigate to the Couchbase web interface using the URL provided. + - Login using the credentials (`CB_USERNAME` and `CB_PASSWORD`) defined in the SDL file. + +2. **Set Up Buckets:** + - Buckets in Couchbase are similar to databases in relational systems. + - Create buckets for your application needs. + +3. **Connect Your Application:** + - Use Couchbase SDKs or drivers to connect your application to the deployed Couchbase instance. + +--- + +### **5. Monitor and Scale** + +- **Monitor Usage:** + Use the Couchbase admin UI to monitor resource usage and performance. + +- **Scale the Deployment:** + Update the `count` field in the SDL file to scale the Couchbase deployment horizontally and redeploy. + +--- + +## **Conclusion** + +Deploying Couchbase on Akash provides a cost-efficient and scalable solution for distributed applications. By leveraging Akash’s decentralized infrastructure and Couchbase’s advanced database capabilities, you can build high-performance, scalable systems with minimal overhead. \ No newline at end of file diff --git a/src/content/Docs/guides/databases/influxdb/index.md b/src/content/Docs/guides/databases/influxdb/index.md new file mode 100644 index 00000000..fbe1424c --- /dev/null +++ b/src/content/Docs/guides/databases/influxdb/index.md @@ -0,0 +1,147 @@ +--- +categories: ["Guides"] +tags: ["Database", "SQL", "Version Control"] +weight: 1 +title: "Deploying InfluxDB on Akash" +linkTitle: "InfluxDB" +--- + + + +This guide will help you deploy **InfluxDB** on the Akash Network using the official InfluxDB Docker image. + +--- +## **Overview of InfluxDB** + +**InfluxDB** is a high-performance time-series database designed to handle large amounts of data generated by IoT devices, DevOps monitoring, and analytics applications. It is used for storing, querying, and visualizing metrics and events over time. Key features include: + +- **Time-series data storage**: Efficiently stores and retrieves time-stamped data. +- **Powerful query language**: Supports InfluxQL or Flux for querying data. +- **Data visualization**: Integrates with tools like Grafana or its native UI. +- **High availability**: Scalable for production-level deployments. + +## **Requirements** + +1. **Akash CLI**: Installed and configured. +2. **Docker**: Installed locally for creating/testing deployments (optional). +3. **An Akash Wallet**: Funded with AKT tokens. +4. **SDL Template**: For deploying the InfluxDB Docker container. + +--- + +## **Steps to Deploy InfluxDB on Akash** + +### **1. Prepare the SDL File** + +The **SDL (Service Definition Language)** file specifies the configuration for deploying the InfluxDB Docker container. Below is an example SDL file for deploying InfluxDB: + +``` +--- +version: "2.0" + +services: + influxdb: + image: influxdb:latest + expose: + - port: 8086 + as: 80 + to: + - global: true + env: + - INFLUXDB_DB=my_database + - INFLUXDB_ADMIN_USER=admin + - INFLUXDB_ADMIN_PASSWORD=admin_password + - INFLUXDB_USER=my_user + - INFLUXDB_USER_PASSWORD=my_user_password + +profiles: + compute: + influxdb: + resources: + cpu: 0.5 + memory: 512Mi + storage: 1Gi + placement: + akash: + attributes: + region: us-west + pricing: + influxdb: + denom: uakt + amount: 100 + +deployment: + influxdb: + influxdb: + profile: influxdb + count: 1 +``` +Save it as `deploy.yaml`. + +### **2. Customize Your Deployment** + +- **Database Configuration**: + - Update the environment variables (e.g., `INFLUXDB_ADMIN_PASSWORD`, `INFLUXDB_USER_PASSWORD`) with your preferred credentials. +- **Resource Allocation**: + - Adjust `cpu`, `memory`, and `storage` in the `compute` profile to match your application requirements. +- **Region and Pricing**: + - Set the `region` to your preferred Akash network location and pricing as needed. + +### **3. Deploy on Akash** + +1. **Validate the SDL File**: + Run the following command to validate the SDL file: + ```bash + akash tx deployment create deployment.yml --from --chain-id --node + ``` + +2. **Wait for Bidding**: + After deploying the SDL file, Akash providers will bid on your deployment. Accept a suitable bid. + +3. **Get the Lease**: + Once the bid is accepted, obtain the lease for your deployment: + ```bash + akash query market lease list --owner + ``` + +4. **Access the Deployment**: + Retrieve the external IP or URL assigned to your service. The InfluxDB API/UI will be accessible on port **80** or the domain configured by the Akash provider. + +### **4. Verify the Deployment** + +1. **Access InfluxDB**: + Open the assigned domain or IP in a web browser or use `curl`: + ```bash + curl http://:80/ping + ``` + If InfluxDB is running, it will return a `204 No Content` response. + +2. **Login to the Admin UI**: + Navigate to `http://:80` in a browser. Use the credentials set in the environment variables (e.g., `INFLUXDB_ADMIN_USER` and `INFLUXDB_ADMIN_PASSWORD`). + +### **5. Connect Your Applications** + +Use the InfluxDB URL to connect applications or tools like Grafana for visualizing your data. Example of connecting using InfluxQL: +```bash +curl -XPOST 'http://:80/query' \ +-d 'q=CREATE DATABASE my_database' \ +--user admin:admin_password +``` + +--- + +## **Key Notes** + +1. **Persistent Storage**: + - Akash provides persistent storage with only during the lease. Review the [documentation](docs/network-features/persistent-storage/) for a further explanation of the limitations. + +2. **Scaling**: + - Modify the `count` parameter in the `deployment` section to scale horizontally. + +3. **Security**: + - Always use strong passwords for your database and user accounts. + - Use a firewall or restrict access to specific IPs for production environments. + +--- + +By following these steps, you'll have InfluxDB running on Akash, providing a scalable, decentralized solution for your time-series data needs. \ No newline at end of file diff --git a/src/content/Docs/guides/databases/mariadb/index.md b/src/content/Docs/guides/databases/mariadb/index.md new file mode 100644 index 00000000..3afc2792 --- /dev/null +++ b/src/content/Docs/guides/databases/mariadb/index.md @@ -0,0 +1,111 @@ +--- +categories: ["Guides"] +tags: ["Database", "SQL",] +weight: 1 +title: "MariaDB" +linkTitle: "MariaDB" +--- + +MariaDB is a popular open-source relational database management system (RDBMS) that is a fork of MySQL. It is designed for scalability, high performance, and robust data security. MariaDB supports a wide range of storage engines and features SQL compliance, making it suitable for modern application development. + +### **Key Features of MariaDB** +1. **High Performance:** Optimized for speed and scalability. +2. **Open Source:** Free to use and customize. +3. **Cross-Platform:** Compatible with various operating systems and cloud platforms. +4. **Compatibility:** Seamless migration from MySQL. +5. **Scalability:** Suitable for small projects to large-scale enterprise applications. + +--- + +## **Steps to Deploy MariaDB on Akash** + +### **1. Prepare Your Environment** +Ensure you have: +- **Akash CLI** installed and configured. +- A funded wallet with AKT tokens for deploying your workload. +- A domain or IP to access your MariaDB instance, if required. + +### **2. Create the SDL (Service Definition Language) File** +The SDL defines your MariaDB deployment's resource requirements, container image, and other configurations. + +Here’s a sample SDL file for deploying MariaDB on Akash: + +```yaml +--- +version: "2.0" + +services: + mariadb: + image: mariadb:10.6 + env: + - MYSQL_ROOT_PASSWORD=yourpassword + - MYSQL_DATABASE=mydatabase + - MYSQL_USER=myuser + - MYSQL_PASSWORD=mypassword + expose: + - port: 3306 + as: 3306 + to: + - global + +profiles: + compute: + mariadb: + resources: + cpu: + units: 1 + memory: + size: 512Mi + storage: + size: 5Gi + + placement: + global: + pricing: + mariadb: + denom: uakt + amount: 100 + +deployment: + mariadb: + mariadb: + profile: mariadb + count: 1 +``` + +### **3. Customize the SDL** +- Replace `yourpassword`, `mydatabase`, `myuser`, and `mypassword` with your desired credentials. +- Adjust the `resources` (CPU, memory, and storage) based on your workload requirements. +- Set `amount` in the `pricing` section according to your budget. + +### **4. Deploy to Akash** +1. **Submit the SDL file to Akash**: + ```bash + akash tx deployment create deploy.yaml --from --node --chain-id + ``` +2. **Wait for deployment approval**. Once approved, note the lease ID. + +3. **Access the logs** to verify MariaDB is running: + ```bash + akash provider lease-logs --node --chain-id --dseq --gseq --oseq + ``` + +4. **Retrieve the public IP/endpoint** of the deployment for connecting to the database: + ```bash + akash provider lease-status --node --chain-id --dseq + ``` + +### **5. Connect to MariaDB** +Use a MariaDB client or your application to connect to the database instance. The connection string will look like: +```bash +mysql -u myuser -p -h -P 3306 +``` + +--- + +## **Best Practices** +1. **Security**: Use strong passwords and configure firewall rules to restrict database access. +2. **Backup**: Regularly back up your database to ensure data durability. +3. **Scaling**: Monitor resource usage and scale up resources in the SDL as needed. + +This guide provides a streamlined process for deploying MariaDB on Akash. With the SDL template, you can easily adapt the configuration for your specific needs. \ No newline at end of file diff --git a/src/content/Docs/guides/databases/mongodb/index.md b/src/content/Docs/guides/databases/mongodb/index.md new file mode 100644 index 00000000..17b09cef --- /dev/null +++ b/src/content/Docs/guides/databases/mongodb/index.md @@ -0,0 +1,110 @@ +--- +categories: ["Guides"] +tags: ["Database", "NoSQL",] +weight: 1 +title: "MongoDB" +linkTitle: "MongoDB" +--- + +MongoDB is a leading NoSQL database that offers high performance, high availability, and automatic scaling. It stores data in flexible, JSON-like documents, making it an excellent choice for modern applications requiring dynamic, hierarchical, or large-scale data management. With MongoDB, developers can leverage its schema-less nature to adapt quickly to changing application requirements. + +**Key Features of MongoDB:** +- **Flexible Schema:** Store data in JSON-like documents for easier data modeling. +- **Horizontal Scalability:** Scale your application with built-in sharding. +- **High Availability:** Built-in replication ensures redundancy and failover support. +- **Rich Query Language:** Perform complex queries, aggregations, and more. +- **Wide Use Cases:** Ideal for applications in e-commerce, IoT, gaming, and beyond. + +By deploying MongoDB on Akash, a decentralized cloud computing platform, users gain the advantage of cost-effective, censorship-resistant, and resilient database hosting. + +--- + +## **Deploying MongoDB on Akash** + +To deploy MongoDB on Akash, you need to define the deployment parameters in an SDL (Stack Definition Language) file. Below is a step-by-step guide: + +### **Step 1: Install Akash CLI** + +1. Install the Akash CLI on your local machine. + ```bash + curl https://raw.githubusercontent.com/ovrclk/akash/master/godownloader.sh | sh + ``` +2. Initialize your Akash wallet and fund it with the required AKT tokens. + +### **Step 2: Prepare MongoDB SDL File** + +Here’s a sample SDL file for deploying MongoDB: + +``` +version: "2.0" +services: + mongodb: + image: mongo:latest + env: + - MONGO_INITDB_ROOT_USERNAME=admin + - MONGO_INITDB_ROOT_PASSWORD=securepassword + expose: + - port: 27017 + as: 27017 + to: + - global: true + protocol: TCP +profiles: + compute: + mongodb: + resources: + cpu: + units: 1 + memory: + size: 512Mi + storage: + size: 5Gi + placement: + westcoast: + attributes: + region: us-west + pricing: + mongodb: + denom: uakt + amount: 1000 +deployment: + mongodb: + westcoast: + profile: mongodb + count: 1 +``` + +### **Step 3: Deploy the SDL on Akash** + +1. Save the SDL file as `deploy.yaml`. +2. Use the Akash CLI to create a deployment: + ```bash + akash tx deployment create deploy.yaml --from --node --chain-id + ``` +3. Wait for the deployment to be approved and available. + +### **Step 4: Verify the Deployment** + +1. Retrieve the service endpoint using: + ```bash + akash query deployment get + ``` +2. Connect to MongoDB using a MongoDB client or CLI: + ```bash + mongo --host --port 27017 -u admin -p securepassword + ``` + +--- + +## **Advanced Configuration** + +- **Data Persistence:** To persist data, consider mounting persistent storage. Modify the SDL to include volume bindings for the `/data/db` directory. +- **Scaling:** Adjust the compute resources and `count` to scale MongoDB for high-traffic applications. +- **Networking:** Use Akash’s reverse proxy or external load balancers to manage connections securely. + +--- + +## **Conclusion** + +By deploying MongoDB on Akash, you can achieve a cost-effective, decentralized, and scalable database solution for your application. With the flexibility of Akash and the powerful capabilities of MongoDB, you are well-equipped to handle modern data requirements while leveraging decentralized cloud infrastructure. Ensure to monitor the deployment’s performance and optimize resources for cost efficiency. + diff --git a/src/content/Docs/guides/databases/neo4j/index.md b/src/content/Docs/guides/databases/neo4j/index.md new file mode 100644 index 00000000..47305d15 --- /dev/null +++ b/src/content/Docs/guides/databases/neo4j/index.md @@ -0,0 +1,126 @@ +--- +categories: ["Guides"] +tags: ["Database", "Graph",] +weight: 1 +title: "Guide to Deploy Neo4j on Akash " +linkTitle: "Neo4j" +--- + + + +This guide will walk you through deploying Neo4j on Akash, a decentralized cloud computing platform, using the official Neo4j Docker image. + +### **Prerequisites** +1. Akash CLI installed and configured with your wallet. +2. A funded wallet with sufficient AKT to cover deployment costs. +3. Basic knowledge of Akash and Docker. + +--- + +## **Step 1: Prepare the SDL File** + +Save the following content as `deploy.yaml`: + +``` +--- +version: "2.0" + +services: + neo4j: + image: neo4j:latest + env: + - NEO4J_AUTH=neo4j/testpassword # Replace with a secure password + expose: + - port: 7474 + as: 80 + to: + - global + - port: 7687 + as: 7687 + to: + - global + resources: + cpu: + units: 1 + memory: + size: 2Gi + storage: + size: 5Gi + +profiles: + compute: + neo4j: + placement: + cloud: any + resources: + cpu: + units: 1 + memory: + size: 2Gi + storage: + size: 5Gi + + placement: + akash: + attributes: + host: akash + signedBy: + anyOf: + - "akash.network" + pricing: + neo4j: + denom: uakt + amount: 100 # Adjust based on your budget + +deployment: + neo4j: + neo4j: + profile: neo4j + count: 1 +``` + +--- + +## **Step 2: Deploy on Akash** + +1. **Create the deployment:** + Run the following command to create a deployment from the `deploy.yaml` file: + ``` + akash tx deployment create deploy.yaml --from --node --chain-id + ``` + +2. **Monitor the deployment:** + After the deployment is created, query its status: + ``` + akash query deployment list --owner + ``` + +3. **Bid on the deployment:** + Wait for providers to bid on your deployment. Accept a bid with: + ``` + akash tx market lease create --owner --dseq --from + ``` + +4. **Get the service endpoint:** + Once the lease is created, retrieve the service endpoint using: + ``` + akash query market lease-status --owner --dseq --node + ``` + +--- + +## **Step 3: Access Neo4j** + +1. Open a browser and navigate to `http://` for the Neo4j UI. +2. Connect to the Bolt protocol using port `7687` if required for programmatic access. + +--- + +## **Step 4: Secure Your Deployment** + +1. Replace `NEO4J_AUTH=neo4j/testpassword` in the SDL file with a secure password. +2. Use environment variables to securely manage sensitive credentials. + +--- + +This guide should get you up and running with Neo4j on Akash. \ No newline at end of file diff --git a/src/content/Docs/guides/databases/orientdb/index.md b/src/content/Docs/guides/databases/orientdb/index.md new file mode 100644 index 00000000..1a8b328c --- /dev/null +++ b/src/content/Docs/guides/databases/orientdb/index.md @@ -0,0 +1,154 @@ +--- +categories: ["Guides"] +tags: ["Database", "NoSQL",] +weight: 1 +title: "Deploying OrientDB on Akash: A Comprehensive Guide" +linkTitle: "OrientDB" +--- + + +## **Overview of OrientDB** +OrientDB is an open-source, multi-model NoSQL database that combines the benefits of graph and document databases. It is designed to handle large-scale data with flexibility and efficiency, supporting ACID transactions and various querying capabilities. OrientDB is ideal for applications such as social networking, content management, and fraud detection, where relationships and data interconnectivity are critical. + +### **Key Features of OrientDB** +1. **Multi-Model Support**: Offers both graph and document database capabilities. +2. **SQL-Like Query Language**: Provides SQL-like syntax for easy querying. +3. **Scalability**: Handles large datasets with support for sharding and replication. +4. **ACID Compliance**: Ensures reliable transactions. +5. **Open Source**: Freely available with a community and enterprise edition. + +## **What is Akash?** +Akash is a decentralized cloud computing marketplace that allows developers to deploy and manage applications on a distributed network of providers at a lower cost compared to traditional cloud services. Akash uses **SDL (Stack Definition Language)** files to define application configurations for deployment. + +--- + +## **Step-by-Step Guide to Deploy OrientDB on Akash** + +### **Prerequisites** +1. Install the Akash CLI and set up your wallet. +2. Ensure you have AKT tokens for deployment fees. +3. Obtain the Akash provider endpoint. +4. Download OrientDB (Community Edition) or use its Docker image. + +--- + +### **1. Create the SDL File** +The SDL file defines your deployment configuration for OrientDB on Akash. Here's a sample `deploy.yaml` file: + +``` +version: "2.0" + +services: + orientdb: + image: orientdb:latest + env: + - ORIENTDB_ROOT_PASSWORD=yourpassword # Replace 'yourpassword' with a secure password + expose: + - port: 2424 # Binary Protocol Port + as: 2424 + accept: true + to: + - global + - port: 2480 # HTTP/REST API Port + as: 2480 + accept: true + to: + - global + +profiles: + compute: + orientdb: + resources: + cpu: + units: 1 + memory: + size: 2Gi + storage: + size: 10Gi + placement: + default: + attributes: + host: akash + signedBy: + anyOf: + - akash.network + pricing: + orientdb: + denom: uakt + amount: 100 + +deployment: + orientdb: + orientdb: + profile: orientdb + count: 1 +``` + +--- + +### **2. Steps to Deploy OrientDB** + +#### **Step 1: Deploy the SDL File** +1. Save the `deploy.yaml` file in your local directory. +2. Deploy it using the Akash CLI: + + ```bash + akash tx deployment create deploy.yaml --from --node + ``` + +#### **Step 2: Wait for the Deployment** +- After submitting, wait for the deployment to propagate across the network. +- Confirm the deployment status with: + + ```bash + akash query deployment list --owner + ``` + +#### **Step 3: Accept a Bid** +- Once providers offer their bids, accept a bid to proceed: + + ```bash + akash tx market lease create --dseq --gseq --oseq --from + ``` + +#### **Step 4: Access the Application** +- After the deployment is active, retrieve the lease details: + + ```bash + akash query market lease list --owner + ``` + +- Access OrientDB using its exposed endpoints: + - Binary Protocol: `tcp://:2424` + - HTTP/REST API: `http://:2480` + +--- + +### **3. Verifying OrientDB Deployment** +1. Use the OrientDB Web Console: + - Navigate to `http://:2480`. + - Log in with `root` as the username and the password specified in the SDL file. + +2. Test OrientDB with a sample database: + - Create a new database using the Web Console. + - Run queries to verify functionality. + +--- + +### **4. Additional Notes** +- **Scaling**: Modify the `count` in the deployment profile to scale horizontally. +- **Monitoring**: Use Akash’s logs and metrics tools to monitor your OrientDB instance. +- **Persistence**: Add volume mounts for data persistence in the SDL file if required. + +--- + +### **5. Cleanup** +To delete the deployment and free resources: + +```bash +akash tx deployment close --owner --dseq --from +``` + +--- + +By following this guide, you can effectively deploy and manage OrientDB on Akash's decentralized cloud platform, leveraging its cost-effectiveness and scalability. \ No newline at end of file diff --git a/src/content/Docs/guides/databases/postgres-sql-restore-or-backup/index.md b/src/content/Docs/guides/databases/postgres-sql-restore-or-backup/index.md deleted file mode 100644 index efdb178d..00000000 --- a/src/content/Docs/guides/databases/postgres-sql-restore-or-backup/index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -categories: ["Guides"] -tags: ["Blockchain"] -weight: 1 -title: "PostgreSQL restore/backup" -linkTitle: "PostgreSQL restore/backup" ---- - -**Repository**: [ovrclk/akash-postgres-restore](https://github.com/ovrclk/akash-postgres-restore) - -An auto-restoring Postgres server running on Akash, with backups taken on a configurable schedule. Backups are stored on decentralised storage using Filebase. - -Ultimately this is a two container setup, one PostgreSQL server and one scheduler container to restore the database on boot, and run a cronjob to back it up. - -## Usage - -* Setup a [Filebase](https://filebase.com/) account and bucket (or any S3 compatible storage host). -* Set the environment variables in the [deploy.yml](https://github.com/ovrclk/akash-postgres-restore/blob/master/deploy.yml) and deploy on Akash -* Use the URL and port Akash gives you to connect to the Postgres server, with the credentials you provided in the environment variables. For example cluster.ewr1p0.mainnet.akashian.io:31234 - -### Using with an app container - -Alternatively add your own app container to the deploy.yml and expose the Postgres 5432 port to your application only for a local server. - -For example: - -``` -services: - app: - image: myappimage:v1 - depends_on: - - service: postgres - cron: - image: ghcr.io/ovrclk/akash-postgres-restore:v0.0.4 - env: - - POSTGRES_PASSWORD=password - ... - depends_on: - - service: postgres - postgres: - image: postgres:12.6 - env: - - POSTGRES_PASSWORD=password - expose: - - port: 5432 - to: - - service: app - - service: cron -``` - -### Environment variables - -* `POSTGRES_USER=postgres` - your Postgres server username -* `POSTGRES_PASSWORD=password` - your Postgres server password -* `POSTGRES_HOST=postgres` - postgres server host, whatever you named it in deploy.yml -* `POSTGRES_PORT=5432` - postgres port, will be 5432 unless you aliased it in deploy.yml -* `POSTGRES_DATABASE=akash_postgres` - name of your database -* `BACKUP_PATH=bucketname/path` - bucket and path for your deployments. Make sure directories exist first -* `BACKUP_KEY=key` - your Filebase access key -* `BACKUP_SECRET=secret` - your Filebase secret -* `BACKUP_PASSPHRASE=secret` - a passphrase to encrypt your backups with -* `BACKUP_HOST=https://s3.filebase.com` - the S3 backup host, this defaults to Filebase but can be any S3 compatible host -* `BACKUP_SCHEDULE=*/15 * * * *` - the cron schedule for backups. Defaults to every 15 minutes -* `BACKUP_RETAIN=7 days` - how many days to keep backups for - -## Development - -You can run the application locally using Docker compose. - -Copy the `.env.sample` file to `.env` and populate - -Run `docker-compose up` to build and run the application \ No newline at end of file diff --git a/src/content/Docs/guides/databases/postgresql/index.md b/src/content/Docs/guides/databases/postgresql/index.md new file mode 100644 index 00000000..b1a0d10a --- /dev/null +++ b/src/content/Docs/guides/databases/postgresql/index.md @@ -0,0 +1,130 @@ +--- +categories: ["Guides"] +tags: ["Database", "SQL"] +weight: 1 +title: "PostgreSQL" +linkTitle: "PostgreSQL" +--- +- **PostgreSQL**: PostgreSQL is a powerful, open-source, object-relational database system known for its robustness, feature set, and SQL compliance. It supports advanced data types, concurrency, and scalability, making it suitable for small to enterprise-level applications. + +- **Akash Network**: Akash is a decentralized cloud platform that enables users to deploy applications in a cost-effective, secure, and censorship-resistant manner. Using Akash, developers can deploy containerized applications such as PostgreSQL without relying on traditional centralized cloud providers. + +--- + +#### **Key Steps to Deploy PostgreSQL on Akash** + +1. **Set Up Akash CLI**: + - Install the Akash CLI by following the [official guide](https://docs.akash.network/). + - Configure your wallet and fund it with AKT tokens to pay for deployments. + +2. **Prepare the PostgreSQL Docker Image**: + - Choose a PostgreSQL Docker image, such as the official `postgres` image from Docker Hub. + - Ensure the image meets your configuration needs (version, extensions, etc.). + +3. **Define an SDL File for the Deployment**: + - The SDL (Service Definition Language) file defines the deployment's requirements, such as resource allocation, container configuration, and environment variables. + +--- + +#### **Sample SDL File for PostgreSQL Deployment** + +Below is an example SDL file to deploy PostgreSQL on Akash: + +```yaml +--- +version: "2.0" + +services: + postgres-db: + image: postgres:latest + env: + POSTGRES_USER: "your_username" + POSTGRES_PASSWORD: "your_password" + POSTGRES_DB: "your_database" + expose: + - port: 5432 + as: 5432 + to: + - global: true + +profiles: + compute: + postgres-db: + resources: + cpu: + units: 1 + memory: + size: 1Gi + storage: + size: 10Gi + + placement: + akash: + attributes: + region: us-west + pricing: + postgres-db: + denom: uakt + amount: 100 + +deployment: + postgres-db: + postgres-db: + profile: postgres-db + count: 1 +``` + +--- + +#### **Key Sections of the SDL File** + +1. **`services`**: + - Defines the container image to use (`postgres:latest`). + - Configures environment variables (`POSTGRES_USER`, `POSTGRES_PASSWORD`, `POSTGRES_DB`) to initialize PostgreSQL. + - Exposes PostgreSQL on port `5432` and makes it accessible globally. + +2. **`profiles`**: + - **`compute`**: Specifies resource allocation (CPU, memory, and storage). + - **`placement`**: Determines the deployment's region and cost pricing for Akash. + +3. **`deployment`**: + - Maps the service (`postgres-db`) to the compute profile and specifies the number of container instances (`count: 1`). + +--- + +#### **Steps to Deploy** + +1. **Validate the SDL File**: + - Run the following command to ensure your SDL file is valid: + ```bash + akash deploy validate .yaml + ``` + +2. **Create the Deployment**: + - Deploy the PostgreSQL service using: + ```bash + akash tx deployment create .yaml --from + ``` + +3. **Bid on the Deployment**: + - After creating the deployment, Akash providers will bid on your job. + - Accept a bid to finalize the deployment. + +4. **Access the PostgreSQL Service**: + - Use the lease details to retrieve the service’s public endpoint. + - Connect to the PostgreSQL instance using a client or application: + ```bash + psql -h -U -d + ``` + +--- + +#### **Tips and Best Practices** + +- **Backup Your Data**: Use persistent storage or an external volume to ensure your PostgreSQL data is retained after container restarts. +- **Secure Connections**: Use tools like SSH tunnels or VPNs to secure database access. +- **Scaling**: Update the `count` value in the `deployment` section to increase the number of instances. + +--- + +This guide provides a straightforward process to deploy PostgreSQL on Akash using a sample SDL file. Tailor the configurations to meet your application's specific needs, and enjoy the cost-effective and decentralized benefits of Akash! \ No newline at end of file diff --git a/src/content/Docs/guides/deployments/apache-http/index.md b/src/content/Docs/guides/deployments/apache-http/index.md new file mode 100644 index 00000000..409b4b91 --- /dev/null +++ b/src/content/Docs/guides/deployments/apache-http/index.md @@ -0,0 +1,129 @@ +--- +categories: ["Guides"] +tags: ["Deployments"] +weight: 1 +title: "Deploy an Apache HTTP Server on Akash" +linkTitle: "Apache HTTP Server" +--- + +The [Apache HTTP Server Project](https://httpd.apache.org/) is an effort to develop and maintain an open-source HTTP server for modern operating systems including UNIX and Windows. The goal of this project is to provide a secure, efficient and extensible server that provides HTTP services in sync with the current HTTP standards. + +To deploy an Apache HTTP Server on Akash using Docker and Akash CLI, follow this step-by-step guide. We'll use the official Apache Docker image and deploy it with Akash. + +## Prerequisites + +1. **Install Akash CLI**: [Installation guide](docs/deployments/akash-cli/overview/) +2. **Set up an Akash Wallet**: [Wallet setup guide](docs/deployments/akash-cli/installation/#create-an-account) +3. **Fund your Wallet**: Ensure your wallet is funded with AKT to cover deployment costs. [Funding guide](docs/deployments/akash-cli/installation/#fund-your-account) +4. **Basic SDL Knowledge**: Understand how SDL files work for Akash deployments. +5. **Docker**: Installed locally to test the Apache container. + +## Step 1: Create an SDL File + +We’ll use the **Apache HTTP Server official Docker image** to create the SDL file. Here’s a sample `deploy.yaml`: + +``` +--- +version: "2.0" + +services: + apache: + image: httpd:latest # Official Docker image for Apache + expose: + - port: 80 + as: 80 + to: + - global: true + +profiles: + compute: + apache: + resources: + cpu: + units: 1 + memory: + size: 512Mi + storage: + size: 1Gi + placement: + global: + attributes: + region: us-west # Specify your preferred region + pricing: + apache: + denom: uakt + amount: 100 + +deployment: + apache: + <<: *apache + profile: apache + count: 1 +``` + +## Step 2a: Deploy Using Akash CLI (option) + +1. Initialize Deployment: + + - Save the `deploy.yaml` file in your working directory. + + - Run the following command to create the deployment: +``` + akash tx deployment create deploy.yaml --from --node --chain-id --fees +``` +2. Bid Selection: + + - Monitor the bids for your deployment using: +``` +akash query market bid list --owner +``` +3. Accept a bid with: +``` + akash tx market lease create --bid-id --from --fees +``` +4. Verify Lease: + + - Confirm the lease creation with: +``` + akash query market lease list --owner +``` +5. Access Deployment: + + - Use the deployment’s external URI to access your Apache server. + +## Step 2b: Deploy Using Akash Console (alternative option) + +1. Log In: + - Open the `Akash Console`. + +2. Upload SDL: + - Use the "Deploy" section and upload the `deploy.yaml` file. + +3. Choose Configuration: + - Select preferred pricing and configuration options. + +4. Deploy: + - Click "Deploy" and confirm using your wallet. + +5. Monitor Deployment: + - Watch logs and deployment status directly from the console. + +6. Access Deployment: + - Copy the public endpoint provided after deployment is complete. + +## Step 3: Test Your Apache HTTP Server + +1. Access the Server: + - Open the external URI in your browser. You should see the default Apache HTTP Server page. + +2. Custom Configuration: + - Modify configurations by creating a Dockerfile to include your custom `httpd.conf` and updating the image in the SDL. + +## Useful Links + + - Apache Docker Hub: https://hub.docker.com/_/httpd + - Akash Documentation: https://docs.akash.network + - Akash Console: https://console.akash.network + - Akash CLI Deployment: https://docs.akash.network/guides/deploy + +This guide ensures you have a functional Apache HTTP server running on Akash Network using either the CLI or the console. \ No newline at end of file diff --git a/src/content/Docs/guides/deployments/caddy/index.md b/src/content/Docs/guides/deployments/caddy/index.md new file mode 100644 index 00000000..0e1ff497 --- /dev/null +++ b/src/content/Docs/guides/deployments/caddy/index.md @@ -0,0 +1,117 @@ +--- +categories: ["Guides"] +tags: ["Deployments"] +weight: 1 +title: "Setup, Deploy, and Launch Caddy on Akash Network" +linkTitle: "Caddy" +--- + +[Caddy](https://caddyserver.com/) is an extensible server platform that uses TLS by default. This guide walks you through setting up, deploying, and launching the Caddy web server on the Akash Network. We'll use the official Docker image for Caddy and leverage your provided SDL template to craft the deployment configuration. + +## Prerequisites + +1. **Install Akash CLI**: Follow the official guide to [set up the Akash CLI](docs/getting-started/quickstart-guides/akash-cli/). +2. **Create an Akash Wallet**: If you don’t already have an Akash wallet, use the [wallet creation guide](docs/getting-started/token-and-wallets/#keplr-wallet) to set one up. +3. **Fund Your Wallet**: Fund your Akash wallet with AKT tokens to pay for deployments. +4. **Install Docker**: Ensure Docker is installed and running on your local machine. +5. **Install a Code Editor**: Use an editor like VSCode for editing SDL files. + +## Step 1: Prepare the SDL File (deploy.yaml) + +Below is an example SDL file (deploy.yaml) for deploying Caddy using its official Docker image: + +``` +--- +version: "2.0" + +services: + caddy: + image: caddy:latest + env: + - CADDY_HOST=:80 + expose: + - port: 80 + to: + - global: true + +profiles: + compute: + caddy: + resources: + cpu: + units: 0.25 + memory: + size: 512Mi + storage: + size: 1Gi + + placement: + akash: + attributes: + region: us-west + pricing: + caddy: + denom: uakt + amount: 500 + +deployment: + caddy: + akash: + profile: caddy + count: 1 +``` + +### Notes: + +- **image**: Uses the official Caddy Docker image. +- **port**: Caddy listens on port 80 globally. +- **resources**: Allocates minimal CPU, memory, and storage resources. +- **pricing**: Defines a base price for deployment. + +## Step 2: Deploy Using Akash CLI + +1. Validate the SDL File: +``` +akash deployment validate deploy.yaml +``` + +2. Create Deployment: +``` +akash tx deployment create deploy.yaml --from --chain-id --node --fees uakt +``` +3. Query Lease: After creating the deployment, query the lease to ensure it’s active: +``` + akash query market lease list --owner --node +``` +4. Access the Deployment: Note the endpoint provided in the lease logs. Use this URL to access your Caddy instance. + +For detailed steps on using the CLI, refer to the Akash CLI Deployment Guide. + +## Step 3: Deploy Using Akash Console + +1. Access the Akash Console: Go to https://console.akash.network. + +2. Login: Connect your Akash wallet to the console. + +3. Create Deployment: + - Upload the deploy.yaml file. + - Follow the prompts to set pricing and finalize the deployment. + +4. Monitor Deployment: Use the console to monitor logs and obtain the deployment’s public endpoint. + +For more details, follow the [Akash Console Guide](http://localhost:4321/docs/deployments/akash-console/). + +## Step 4: Verify and Launch Caddy + +1. **Test the Endpoint**: Visit the public endpoint URL from your lease logs or the Akash console. You should see Caddy's default web page. + +2. **Customize Caddy**: + - Create a Caddyfile for custom configurations. + - Update the Docker image to mount your Caddyfile. + +3. **Redeploy if Necessary**: Update your SDL file and redeploy for any configuration changes. + +## Troubleshooting + +- **Common Deployment Issues**: https://docs.akash.network/guides/deploy/troubleshooting +- **Akash Discord Support**: Join [Akash Discord](https://discord.gg/akash) for community help. \ No newline at end of file diff --git a/src/content/Docs/guides/deployments/nginx/index.md b/src/content/Docs/guides/deployments/nginx/index.md new file mode 100644 index 00000000..e4a80613 --- /dev/null +++ b/src/content/Docs/guides/deployments/nginx/index.md @@ -0,0 +1,118 @@ +--- +categories: ["Guides"] +tags: ["Deployments"] +weight: 1 +title: "Setup, Deploy, and Launch an Nginx on Akash Network" +linkTitle: "Nginx" +--- + +Here's a step-by-step guide on how to set up, deploy, and launch an Nginx server to Akash, using the official Docker image, the Akash CLI, or the console. + +## Step 1: Install Akash CLI + +Follow the Akash CLI installation guide to set up the CLI tool for managing deployments. Ensure you have: + +- Akash wallet created and funded. +- Your node and CLI properly configured. + +## Step 2: Create the SDL File + +Below is a sample `deploy.yaml` file + +``` +--- +version: "2.0" + +services: + nginx: + image: nginx:latest # Official Nginx Docker image + expose: + - port: 80 + as: 80 + to: + - global: true + +profiles: + compute: + nginx: + resources: + cpu: + units: 1 + memory: + size: 512Mi + storage: + size: 1Gi + placement: + default: + attributes: + region: us-west + pricing: + nginx: + denom: uakt + amount: 50 + +deployment: + nginx: + nginx: + profile: nginx + count: 1 +``` + +- **Image**: `nginx:latest` pulls the latest official Docker image for Nginx. +- **Expose**: Port 80 is exposed globally to make your server accessible on the web. +- **Resources**: Defines the compute resources (CPU, memory, and storage) for the container. +- **Pricing**: Sets a price in Akash tokens (`uakt`) for your deployment. + +## Step 3: Deploy to Akash + +### Step 3a: Deploy Using Akash CLI (option a) + +1. **Authenticate and prepare your environment**: + +``` +akash tx authz grant --from +``` +Replace `` and `` with your provider's address and your wallet name. + +2. **Submit the SDL file for deployment**: +``` +akash tx deployment create deploy.yaml --from +``` +3. **Bid on a provider: Run this command to find available bids**: +``` +akash query market bid list --owner +``` +4. **Accept a bid and deploy**: +``` +akash tx market lease create --bid-id --from +``` +5. **Check deployment status**: +``` + akash query market lease status --owner --dseq +``` +6. **Access your Nginx server**: Use the provided endpoint to access your running Nginx server. + +For a full CLI guide, visit: [Akash CLI Deployment](docs/deployments/akash-cli/overview/). + +### Step 3b: Deploy Using Akash Console + +1. Navigate to the [Akash Console](https://console.akash.network/). +2. Login using your Akash wallet. +3. Upload the `deploy.yaml` file. +4. Submit your deployment request. +5. Choose a provider and approve the bid. +6. Monitor your deployment and retrieve the endpoint URL once the deployment is live. + +For detailed instructions, check the [Akash Console Guide](docs/deployments/akash-console/). + +## Step 4: Verify Your Nginx Server + +Once your deployment is live: + + 1. Visit the provided endpoint URL. + 2. You should see the default Nginx welcome page. + +## Additional Notes + +- If you need to customize the Nginx configuration, create a custom nginx.conf file and mount it in the Docker container. Update the services block in the SDL file to include a volume mapping. +- For persistent storage or logging, configure additional storage resources in the SDL file. \ No newline at end of file diff --git a/src/content/Docs/guides/deployments/ruby-on-rails-with-sia-and-auth0/index.md b/src/content/Docs/guides/deployments/ruby-on-rails-with-sia-and-auth0/index.md deleted file mode 100644 index f00e2c5e..00000000 --- a/src/content/Docs/guides/deployments/ruby-on-rails-with-sia-and-auth0/index.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -categories: ["Deployments"] -tags: ["Blockchain"] -weight: 1 -title: "Ruby on Rails with Sia and Auth0" -linkTitle: "Ruby on Rails with Sia and Auth0" ---- - - - -**Repository**: [ovrclk/akash-on-rails](https://github.com/ovrclk/akash-on-rails) - - -This is an example Rails Pinterest clone hosted on Akash. There are a few extra features to make the most of decentralised hosting: - -* Database backup/restore to [Sia](https://sia.tech/) via [Filebase](https://filebase.com/). -* User image uploads to [Sia](https://sia.tech/) via [Filebase](https://filebase.com/). -* [Auth0](https://auth0.com/) user authentication. -* [Cloudflare](https://www.cloudflare.com/) DNS and SSL. -* Scheduled tasks using [Whenever](https://github.com/javan/whenever). - -## Architecture - -### App container - -* Runs the rails server and hosts the actual website. -* Connects to the Postgres container for a persistent database. -* Hosts files on [Filebase](https://filebase.com/), ([Sia](https://sia.tech/) and [Storj](https://www.storj.io/) hosting currently). -* Uses [Auth0](https://auth0.com/) for user login and registration. - -### Cron container - -* Auto-restores the Postgres database on boot, achieving persistent database through re-deploys. -* Auto-backup of the database to [Filebase](https://filebase.com/) every 15 minutes. -* Crontab is defined using [Whenever](https://github.com/javan/whenever) in [`schedule.rb`](https://github.com/ovrclk/akash-on-rails/blob/master/config/schedule.rb). -* Runs the same docker image as the rails application, but running `cron` instead of the rails server. -* A [standalone database backup/restore container](https://github.com/ovrclk/akash-postgres-restore) is also available. - -### Postgres container - -* Runs a standard Postgres server docker image. - -## Usage - -Ultimately this repository is designed to provide a sensible example of hosting a rails application on Akash. There are a few ways to use it: - -### Run the application as-is on Akash with your own storage and [Auth0](https://auth0.com/) account - -* Setup a free [Cloudflare](https://www.cloudflare.com/) account and add your domain and set nameservers. -* Setup a [Filebase](https://filebase.com/) account and bucket. - * Add a `backups` folder to your bucket. - * You will need your bucket name, client ID, and secret. -* Sign up for an [Auth0](https://auth0.com/) account and set up an App. - * Callback URL: `https://{yourdomain}/auth/auth0/callback`. - * Logout URL: `https://{yourdomain}`. - * You will need your [Auth0](https://auth0.com/) domain, client ID, and secret. -* Using the example deploy.yml, populate the environment variables with the values from [Filebase](https://filebase.com/) and [Auth0](https://auth0.com/). -* Deploy on Akash and get your app URL. -* Point your domain to your app URL using a CNAME in [Cloudflare](https://www.cloudflare.com/). -* Configure 'Full' SSL mode in [Cloudflare](https://www.cloudflare.com/). -* Sign in to your website using [Auth0](https://auth0.com/). The first user created will be made an administrator. - -### Use the relevant files in your own project - -* [Dockerfile](https://github.com/ovrclk/akash-on-rails/blob/master/Dockerfile) - * Rails ready Dockerfile. - * Installs the AWS CLI tool to interact with [Filebase](https://filebase.com/). -* [scripts/run-app.sh](https://github.com/ovrclk/akash-on-rails/blob/master/scripts/run-app.sh) - * Precompiles rails assets. - * Runs the rails server. -* [scripts/run-scheduler.sh](https://github.com/ovrclk/akash-on-rails/blob/master/scripts/run-scheduler.sh) - * Creates and restores the database. - * Runs rake db:migrate and db:seed. - * Sets the crontab using [Whenever](https://github.com/javan/whenever) and runs the cron service. -* [scripts/restore-postgres.sh](https://github.com/ovrclk/akash-on-rails/blob/master/scripts/restore-postgres.sh) - * Downloads latest backup from [Filebase](https://filebase.com/). - * Restore the DB if a backup was found. -* [scripts/backup-postgres.sh](https://github.com/ovrclk/akash-on-rails/blob/master/scripts/backup-postgres.sh) - * Backs up the database to [Filebase](https://filebase.com/). - * Deletes backups older than KEEP\_BACKUPS. -* [config/schedule.rb](https://github.com/ovrclk/akash-on-rails/blob/master/config/schedule.rb) - * [Whenever](https://github.com/javan/whenever) cron schedule file to run scripts/backup-postgres.sh every 15 minutes. -* [config/initializers/shrine.rb](https://github.com/ovrclk/akash-on-rails/blob/master/config/initializers/shrine.rb) - * Configures Shrine within the application to use [Filebase](https://filebase.com/) as an S3 host. -* [deploy.yml](https://github.com/ovrclk/akash-on-rails/blob/master/deploy.yml) - * Akash deploy manifest. - -### Clone the repository and use it as a base for a new project - -* Clone the repository to your own Github account. -* Rename any occurrence of AkashOnRails, akash-on-rails and, akash\_on\_rails to your own app name. -* Change any app/models, app/controllers, app/views as required. - -## Development - -You can run the application locally using Docker compose. - -Copy the `.env.sample` file to `.env` and populate. - -Run `docker-compose up` to build and run the application. \ No newline at end of file diff --git a/src/content/Docs/guides/frameworks/django/index.md b/src/content/Docs/guides/frameworks/django/index.md index f8a3fbec..d60fbeb8 100644 --- a/src/content/Docs/guides/frameworks/django/index.md +++ b/src/content/Docs/guides/frameworks/django/index.md @@ -2,7 +2,7 @@ categories: ["Guides"] tags: ["Frameworks"] weight: 1 -title: "HDeploying a Django App on Akash" +title: "Deploying a Django App on Akash" linkTitle: "Django" --- diff --git a/src/content/Docs/guides/frameworks/flask/index.md b/src/content/Docs/guides/frameworks/flask/index.md new file mode 100644 index 00000000..5efd7e08 --- /dev/null +++ b/src/content/Docs/guides/frameworks/flask/index.md @@ -0,0 +1,149 @@ +--- +categories: ["Guides"] +tags: ["Frameworks", "Python", "Flask"] +weight: 1 +title: "Deploying a Flask App on Akash" +linkTitle: "Flask" +--- + +Here’s a step-by-step guide on how to create and deploy a Flask application on the Akash decentralized cloud using the sample SDL template. This guide assumes you have basic knowledge of Flask and are familiar with Docker and the Akash ecosystem. + +## Prerequisites + +1. **Install Akash CLI**: Ensure the Akash CLI (akash) is installed and configured on your system. +2. **Set Up Wallet**: Create an Akash wallet and fund it with AKT tokens. +4. **Docker Installed**: Have Docker installed for containerizing your Flask app. +5. **Basic Flask App**: Have a working Flask application. + +## Step 1: Prepare Your Flask App + +1. **Create a Flask App Structure**: Your Flask app should look something like this: + +``` +my-flask-app/ +├── app/ +│ ├── __init__.py +│ ├── routes.py +├── Dockerfile +├── requirements.txt +├── config.py +└── wsgi.py +``` +2. **Create the Dockerfile**: Create a Dockerfile to containerize your application. Example: + +``` +FROM python:3.9-slim + +WORKDIR /app + +COPY requirements.txt requirements.txt +RUN pip install -r requirements.txt + +COPY . . + +CMD ["gunicorn", "-w", "4", "-b", "0.0.0.0:5000", "wsgi:app"] +``` + +3. **Install Dependencies Create a `requirements.txt` file with the necessary Flask dependencies: + +``` +Flask==2.3.2 +gunicorn==21.2.0 +``` + +## Step 2: Containerize the Application + +1. **Build the Docker Image** In the project root directory, run: +``` +docker build -t my-flask-app . +``` +2. **Test Locally** Run the container to ensure it works: +``` +docker run -p 5000:5000 my-flask-app +``` + +## Step 3: Prepare the SDL File + +Below is a sample SDL template tailored for a Flask app: + +``` +version: "2.0" + +services: + flask-service: + image: your-dockerhub-username/my-flask-app:latest + expose: + - port: 5000 + as: 80 + to: + - global: true + +profiles: + compute: + flask-compute: + resources: + cpu: + units: 1 + memory: + size: 512Mi + storage: + size: 1Gi + placement: + akash: + attributes: + host: akash + signedBy: + anyOf: + - "akash1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + pricing: + flask-compute: + denom: uakt + amount: 100 + +deployment: + flask-deployment: + flask-service: + profile: flask-compute + count: 1 +``` + +## Step 4: Push Your Docker Image + +1. Tag Your Image + +``` +docker tag my-flask-app your-dockerhub-username/my-flask-app:latest +``` + +2. Push to DockerHub +``` +docker push your-dockerhub-username/my-flask-app:latest +``` + +## Step 5: Deploy on Akash + +1. **Create Deployment** Use the Akash CLI to create the deployment: +``` +akash tx deployment create deploy.yaml --from --node --chain-id --fees 5000uakt +``` +2. **Approve Lease** After creating the deployment, view bids: +``` +akash query market bid list --owner +``` +Select a provider and approve the lease: +``` +akash tx market lease create --dseq --from --provider --node --chain-id --fees 5000uakt +``` +3. **Access Your App** Once the lease is approved, Akash will provide an external URL or IP for accessing the deployed Flask app. + +## Step 6: Verify Deployment +1. **Check Logs** To debug any issues: +``` +akash logs --dseq --gseq --oseq +``` +2. **Test Application** Visit the external URL or IP in a browser to ensure the Flask app is running. + +## Optional Enhancements + +- **Enable HTTPS**: Use a reverse proxy like NGINX with SSL certificates. +- **Scaling**: Adjust the count in the deployment to scale the number of instances. diff --git a/src/content/Docs/guides/frameworks/gatsby/index.md b/src/content/Docs/guides/frameworks/gatsby/index.md new file mode 100644 index 00000000..5ca8ad42 --- /dev/null +++ b/src/content/Docs/guides/frameworks/gatsby/index.md @@ -0,0 +1,121 @@ +--- +categories: ["Guides"] +tags: ["Frameworks"] +weight: 1 +title: "How to Build and Deploy a Gatsby App on Akash" +linkTitle: "Gatsby" +--- + +This guide walks you through the process of building a Gatsby app and deploying it to the Akash Network. We'll cover both using the Akash Console and the Akash CLI. + +## 1. Build Your Gatsby App + +1. Install Gatsby CLI: + +``` +npm install -g gatsby-cli +``` +2. Create a Gatsby Project: +``` +gatsby new my-gatsby-app https://github.com/gatsbyjs/gatsby-starter-default +cd my-gatsby-app +``` +3. Build for Production: +``` +gatsby build +``` +This creates a `public/` directory containing the static files for deployment. + +## Prepare Your Akash Deployment + +**A. Sample `deploy.yaml`** + +Use the following SDL file to configure your Akash deployment. Update placeholders like `YOUR_IMAGE` and `YOUR_DOMAIN` accordingly. + +``` +--- +version: "2.0" + +services: + gatsby: + image: nginx:latest + env: + - NGINX_PORT=80 + expose: + - port: 80 + as: 80 + to: + - global: true + volumes: + - gatsby-data:/usr/share/nginx/html + +profiles: + compute: + gatsby: + resources: + cpu: + units: 0.5 + memory: + size: 512Mi + storage: + size: 1Gi + + placement: + default: + attributes: + region: us-west + signedBy: + anyOf: + - "akash1YOURPROVIDERADDRESS" + pricing: + gatsby: + denom: uakt + amount: 100 + +deployment: + gatsby: + gatsby: + profile: gatsby + placement: default +``` + +**B. Upload Static Files** + +Before deployment, host the public/ files on your image. For example: + + Use Docker to create an image: + ``` + docker build -t YOUR_IMAGE . + docker push YOUR_IMAGE + ``` +## 3. Deploy Using Akash + +### Option A: Akash Console + +1. Go to the [Akash Console](https://console.akash.network/). +2. Log in with your Keplr wallet. +3. Create a deployment: + + - Upload the deploy.yaml file. + - Specify the price you’re willing to pay. + +4. Approve the lease once a provider accepts your deployment. +5. Use the provider endpoint to access your app. + +## Option B: Akash CLI + +1. Install the Akash CLI by following the [CLI Installation Guide](docs/getting-started/quickstart-guides/akash-cli/). +2. Fund your wallet to pay for deployment fees. +3. Deploy your app: +``` +akash tx deployment create deploy.yaml --from YOUR_WALLET --chain-id akashnet-2 +``` +4. Monitor the status: +``` +akash query deployment list --owner YOUR_WALLET +``` +5. Once the lease is active, access your app via the provider’s endpoint. + +## 4. Test Your Deployment + +Visit the endpoint provided by the Akash provider to ensure your Gatsby app is live and functional. \ No newline at end of file diff --git a/src/content/Docs/guides/frameworks/nestjs/index.md b/src/content/Docs/guides/frameworks/nestjs/index.md new file mode 100644 index 00000000..c4ce0e6c --- /dev/null +++ b/src/content/Docs/guides/frameworks/nestjs/index.md @@ -0,0 +1,149 @@ +--- +categories: ["Guides"] +tags: ["Web Development", "NestJS", "JavaScript", "Framework"] +weight: 1 +title: "Building and Deploying a NestJS App on Akash Network" +linkTitle: "NestJS" +--- + +This guide walks you through setting up, containerizing, and deploying a NestJS application on Akash Network. + +## 1. Build a NestJS App + +Follow these steps to create and prepare your NestJS app: + +### Step 1.1: Create a New NestJS App + +1. Ensure Node.js and npm are installed on your system. +2. Run the following commands: +``` + npm i -g @nestjs/cli + nest new my-nestjs-app +``` +3. Navigate into the project: +``` + cd my-nestjs-app +``` +### Step 1.2: Configure Your Application +- Install any additional packages your app needs (e.g., `npm install --save `). +- Update the application logic in the appropriate directories (e.g., `src/`). + +## 2. Containerize the App + +To deploy the app on Akash, you need to containerize it using `Docker`. + +### Step 2.1: Create a `Dockerfile` + +In the root of your NestJS app, create a Dockerfile: +``` +# Base image +FROM node:18-alpine + +# Set working directory +WORKDIR /app + +# Copy package files and install dependencies +COPY package*.json ./ +RUN npm install + +# Copy the rest of the app +COPY . . + +# Build the app +RUN npm run build + +# Expose port 3000 +EXPOSE 3000 + +# Command to start the app +CMD ["npm", "run", "start:prod"] +``` +### Step 2.2: Build and Test the Image + +1. Build the Docker image: +``` +docker build -t my-nestjs-app . +``` +2. Run the image locally to test: +``` +docker run -p 3000:3000 my-nestjs-app +``` +3. Visit `http://localhost:3000` to confirm your app works. + +## 3. Deploy to Akash +Akash supports deployments via its CLI or Web Console. Below, you'll find instructions for both, along with a sample `deploy.yaml` file. + +### Step 3.1: Prepare the Deployment File + +Akash deployments require an SDL file (deploy.yaml). Below is a sample based template: + +``` +--- +version: "2.0" + +services: + web: + image: my-nestjs-app + expose: + - port: 3000 + as: 80 + to: + - global: true + +profiles: + compute: + web: + resources: + cpu: + units: 0.5 + memory: + size: 512Mi + storage: + size: 1Gi + + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - "akash1xxxxxx" + pricing: + web: + denom: uakt + amount: 1000 + +deployment: + web: + westcoast: + profile: web + count: 1 +``` + +Replace `my-nestjs-app` with the name of your image in a public Docker registry (e.g., Docker Hub). + +### Step 3.2: Deploy Using Akash CLI + +1. Install Akash CLI by following Akash CLI Installation Guideenticate your wallet by following Wallet Authentication . +2. Deploy `deploy.yaml` using the commands below: +``` +akash tx deployment create deploy.yaml --from --node +``` +Detailed CLI deployment instructions are available [here](docs/getting-started/quickstart-guides/akash-cli/). + + +### Step 3.3: Deploy Using Akash Web Console + +If you prefer a GUI, use the Akash Console. + +1. Access the console at console.akash.network. +2. Login with your wallet. +3. Follow the Console Deployment Guide to upload your `deploy.yaml` file to complete the process. + +## 4. Monitor and Access Your App + +After deployment: + + 1. Use the Akash CLI or Console to monitor the status of your deployment. + 2. Retrieve the app's external URL or IP from the deployment details. + 3. Access your app via a browser or API client. diff --git a/src/content/Docs/guides/frameworks/nextjs/index.md b/src/content/Docs/guides/frameworks/nextjs/index.md index 108ef91c..2e550235 100644 --- a/src/content/Docs/guides/frameworks/nextjs/index.md +++ b/src/content/Docs/guides/frameworks/nextjs/index.md @@ -1,6 +1,6 @@ --- categories: ["Guides"] -tags: ["Web Development", "React", "JavaScript", "Framework"] +tags: ["Web Development", "Next.js", "JavaScript", "Framework"] weight: 1 title: "Next.js" linkTitle: "Next.js" diff --git a/src/content/Docs/guides/frameworks/nuejs/index.md b/src/content/Docs/guides/frameworks/nuejs/index.md new file mode 100644 index 00000000..ac66cd7b --- /dev/null +++ b/src/content/Docs/guides/frameworks/nuejs/index.md @@ -0,0 +1,138 @@ +--- +categories: ["Guides"] +tags: ["Frameworks"] +weight: 1 +title: "Building and Deploying an Nue JS App on Akash" +linkTitle: "Nue JS" +--- + +This guide assumes you have basic knowledge of Nue and are familiar with Docker and the Akash ecosystem. + +## 1. Building a Vue.js Application + +1. Set up your Vue.js environment: Install Vue CLI if it’s not already installed: + +``` +npm install -g @vue/cli +``` +2. Create a new Vue.js project: + +``` +vue create my-vue-app +``` +Follow the prompts to configure your project. + +3. Build your application for production: + +``` +cd my-vue-app +npm run build +``` +The production-ready files will be located in the `dist` folder. + +## 2. Packaging for Deployment + +1. **Packaging for Deployment**: + + - Create a Dockerfile in your project root: + + ``` + FROM node:16-alpine + + WORKDIR /app + + COPY ./dist /app + + RUN npm install -g serve + + CMD ["serve", "-s", "."] + ``` + + - Build the Docker image: + ``` + docker build -t my-vue-app . + ``` + - Push the image to a container registry like Docker Hub or GHCR: + ``` + docker tag my-vue-app /my-vue-app:latest + docker push /my-vue-app:latest + ``` + +2. **Set up an SDL file**: Use your SDL template to create deploy.yaml: + +``` +--- +version: "2.0" + +services: + web: + image: /my-vue-app:latest + expose: + - port: 80 + as: 80 + to: + - global +profiles: + compute: + web: + resources: + cpu: + units: 0.1 + memory: + size: 128Mi + storage: + size: 512Mi + placement: + devnet: + pricing: + web: + denom: uakt + amount: 100 +deployment: + devnet: + web: + profile: web + count: 1 +``` + +## 3. Deploying the Application + +### Option 1: Using Akash CLI +1. **Set up Akash CLI**: Follow the [Akash CLI](http://localhost:4321/docs/getting-started/quickstart-guides/akash-cli/) setup guide. + +2. **Create and deploy your app**: + + - Fund your wallet: Follow the guide to fund your wallet. + - Deploy the SDL: + ``` + akash tx deployment create deploy.yaml --from --node https://rpc.akash.network:443 --chain-id akashnet-2 + ``` + - Monitor deployment logs and get the lease ID: + ``` + akash query deployment list --owner --node https://rpc.akash.network:443 + ``` + +### Option 2: Using the Akash Console + +1. **Log in to the Akash Console**: Open the [Akash Console](https://console.akash.network/). + +2. Create your deployment: + - Upload the `deploy.yaml` file in the deployment wizard. + - Review the generated manifest and submit the deployment. + +3. Select a provider: Choose a provider, bid on resources, and wait for the deployment to become active. + +## 4. Verifying Your Deployment + +Once your deployment is live: + 1. Access your app through the IP and port provided by the Akash Network. + 2. Configure your domain (if required) to point to the IP using an A record. + +## Useful Links + + - Akash CLI setup: https://docs.akash.network/cli/install + - Funding wallet: https://docs.akash.network/guides/funding-wallet + - Akash Console: https://console.akash.network/ + + + diff --git a/src/content/Docs/guides/frameworks/vuejs/index.md b/src/content/Docs/guides/frameworks/vuejs/index.md new file mode 100644 index 00000000..9fd3ac8a --- /dev/null +++ b/src/content/Docs/guides/frameworks/vuejs/index.md @@ -0,0 +1,133 @@ +--- +categories: ["Guides"] +tags: ["Web Development", "Vue.js", "JavaScript", "Framework"] +weight: 1 +title: "Guide to Building and Deploying a Vue.js App on Akash" +linkTitle: "Vue.js" +--- + +This guide will walk you through building a Vue.js application, containerizing it, and deploying it to the Akash Network using the Akash CLI or the Akash Console. + +## Step 1: Create and Build a Vue.js App + +1. Create a Vue.js App: + +``` +npm init vue@latest vue-app +cd vue-app +npm install +``` + +2. Build the App for Production: + +``` +npm run build +``` +This will generate a `dist` folder containing the production-ready app. + +## Step 2: Containerize the Vue.js App + +1. Create a Dockerfile: In the root of your project, create a Dockerfile: +``` +FROM nginx:alpine +COPY dist/ /usr/share/nginx/html +EXPOSE 80 +CMD ["nginx", "-g", "daemon off;"] +``` +2. Build the Docker Image: + +``` +docker build -t vue-app:latest . +``` +3. Test the Image Locally (Optional): +``` +docker run -d -p 8080:80 vue-app:latest +``` + +Visit `http://localhost:8080` to confirm the app is running. + +4. Push the Image to a Container Registry (e.g., Docker Hub): + +``` +docker tag vue-app:latest /vue-app:latest +docker push /vue-app:latest +``` + +## Step 3: Prepare the SDL File for Deployment + +The following is a sample `deploy.yaml` file. Update the fields as needed, such as `image`, `price`, and `resources`. + +``` +version: "2.0" + +services: + vue-app: + image: /vue-app:latest + expose: + - port: 80 + as: 80 + to: + - global: true + +profiles: + compute: + vue-app: + resources: + cpu: + units: 0.5 + memory: + size: 512Mi + storage: + size: 1Gi + placement: + vue-app: + pricing: + vue-app: + denom: uakt + amount: 100 + +deployment: + vue-app: + vue-app: + profile: vue-app + count: 1 +``` +Save this file as `deploy.yaml`. + +## Step 4: Deploy on Akash + +Option 1: Using Akash CLI + +1. Install Akash CLI: Follow the guide [here](docs/getting-started/quickstart-guides/akash-cli/) to set up the CLI. + +2. Fund Your Account: Fund your Akash wallet to cover deployment costs. Instructions can be found [here](http://localhost:4321/docs/getting-started/token-and-wallets/). + +3. Deploy: + - Create a deployment: + + ``` + akash tx deployment create deploy.yaml --from --chain-id + + ``` + - Review bids and accept: + ``` + akash query market lease list --owner + akash tx market lease create --from --chain-id --provider --dseq --gseq 1 --oseq 1 + ``` +4. Monitor Deployment: Use akash logs to verify the deployment: +``` +akash provider lease-logs --from --provider --dseq +``` + +## Option 2: Using Akash Console + +1. **Access the Console**: Visit the [Akash Console](https://console.akash.network/). + +2. **Log In**: Connect your wallet to the console. + +3. **Upload SDL**: Upload the `deploy.yaml` file and follow the on-screen steps to deploy. + +4. **Monitor and Manage**: Use the console interface to monitor and manage your deployment. + +## Step 5: Access Your Deployed App +Once the deployment is complete, Akash will provide a public URL or IP for accessing your app. Open it in your browser to confirm your Vue.js app is live. \ No newline at end of file diff --git a/src/content/Docs/guides/games/minecraft/index.md b/src/content/Docs/guides/games/minecraft/index.md new file mode 100644 index 00000000..2481b5dd --- /dev/null +++ b/src/content/Docs/guides/games/minecraft/index.md @@ -0,0 +1,138 @@ +--- +categories: ["Guides"] +tags: ["Games"] +weight: 1 +title: " Deploying a Minecraft Server on Akash" +linkTitle: "Minecraft" +--- + + +Akash Network is a decentralized cloud computing platform that offers cost-effective and flexible solutions for deploying various applications, including game servers. Deploying a Minecraft server on Akash lets you enjoy the benefits of decentralization, reduced hosting costs, and high availability. + +In this guide, we will walk you through deploying a Minecraft server on Akash using an example SDL (Service Definition Language) template. We'll also explain key configurations to customize the Minecraft experience. + +--- + +## **Why Use Akash for a Minecraft Server?** + +1. **Decentralization:** Host your server on a blockchain-powered cloud to reduce reliance on traditional hosting providers. +2. **Cost-Effective:** Akash's decentralized nature allows users to leverage competitive pricing from providers. +3. **Flexibility:** The Akash platform is compatible with a wide range of applications, including custom game servers. +4. **Performance:** Akash supports hosting on high-performance compute resources, ensuring low latency and reliability for gaming. + +--- + +## **Prerequisites** + +Before deploying your Minecraft server, ensure you have the following: + +1. **Akash Wallet:** To interact with the Akash blockchain. +2. **AKT Tokens:** Used for bidding and payment for compute resources. +3. **Akash CLI or Console Access:** To deploy and manage workloads. +4. **Basic SDL Template:** A file that defines the deployment configuration. +5. **Dockerized Minecraft Server Image:** For example, [itzg's Minecraft Server Docker image](https://hub.docker.com/r/itzg/minecraft-server). + +--- + +## **Steps to Deploy Minecraft Server on Akash** + +### **1. Prepare the SDL File** +The SDL (Service Definition Language) file specifies the deployment details such as compute resources, ports, and Docker image. Below is a sample SDL template for deploying a Minecraft server: + +```yaml +version: "2.0" +services: + minecraft: + image: itzg/minecraft-server:latest + env: + EULA: "TRUE" # Agree to the Minecraft EULA + VERSION: "1.20.1" # Specify the Minecraft server version + MEMORY: "2G" # Allocate memory to the server + expose: + - port: 25565 + as: 25565 + to: + - global: true +profiles: + compute: + minecraft: + resources: + cpu: + units: 1 # 1 CPU core + memory: + size: 2Gi # 2GB RAM + storage: + size: 10Gi # 10GB storage + placement: + westcoast: + attributes: + region: us-west + pricing: + minecraft: # Define pricing for the service + denom: uakt + amount: 100 # Set a bid price +deployment: + minecraft: + westcoast: + profile: minecraft + count: 1 # Deploy a single instance +``` + +### **2. Customize the SDL** +Modify the SDL to fit your requirements: +- **Version:** Adjust `VERSION` to the desired Minecraft server version. +- **Memory:** Increase or decrease the `MEMORY` allocation based on expected player load. +- **Storage:** Ensure enough storage is available for world data and plugins. +- **Pricing:** Set a competitive bid price under `amount` to secure compute resources. + +--- + +### **3. Deploy the SDL** +Use the Akash CLI to deploy your server. + +1. **Create the Deployment:** + ```bash + akash tx deployment create --from --chain-id + ``` + +2. **Bid for Resources:** + Wait for providers to respond with bids. Accept a bid using the CLI: + ```bash + akash tx market lease create --dseq --from --chain-id + ``` + +3. **Monitor the Deployment:** + Use the following command to check the status of your deployment: + ```bash + akash query market lease list --owner + ``` + +--- + +### **4. Connect to the Minecraft Server** +Once the deployment is active: +- Obtain the external IP address of your Akash deployment. +- Use the Minecraft client to connect to the server by entering the IP address and port `25565`. + +--- + +### **5. Manage and Update the Server** +To manage or update your Minecraft server: +- Modify the SDL file with new configurations. +- Redeploy the updated SDL using the Akash CLI. +- Use the Akash logs to monitor server activity: + ```bash + akash logs --dseq --from + ``` + +--- + +## **Tips for Customization** +- **Add Plugins:** Mount a volume for persistent storage and include plugin files in the Docker container. +- **Backup Worlds:** Schedule backups by adding scripts to the Minecraft container or utilizing Akash's storage options. +- **Enable Mods:** Use a Forge or Fabric Minecraft server image for modding support. + +--- + +## **Conclusion** +With Akash, you can deploy a Minecraft server that benefits from decentralized infrastructure and cost efficiency. By following this guide and customizing the provided SDL template, you can create a Minecraft server tailored to your gaming needs. Enjoy hosting your Minecraft world in a decentralized, scalable environment! \ No newline at end of file diff --git a/src/content/Docs/guides/hosting/caddy/index.md b/src/content/Docs/guides/hosting/caddy/index.md new file mode 100644 index 00000000..dcb65dd4 --- /dev/null +++ b/src/content/Docs/guides/hosting/caddy/index.md @@ -0,0 +1,89 @@ +--- +categories: ["Guides"] +tags: ["Deployment"] +weight: 1 +title: "Setup, Deploy, and Launch Caddy on Akash Network" +linkTitle: "Caddy" +--- + +[Caddy](https://caddyserver.com/) is an extensible server platform that uses TLS by default. This guide walks you through setting up, deploying, and launching the Caddy web server on the Akash Network. We'll use the official Docker image for Caddy and leverage your provided SDL template to craft the deployment configuration. + +## Prerequisites + +1. **Install Akash CLI**: Follow the official guide to [set up the Akash CLI](http://localhost:4321/docs/getting-started/quickstart-guides/akash-cli/). +2. **Create an Akash Wallet**: If you don’t already have an Akash wallet, use the [wallet creation guide](http://localhost:4321/docs/getting-started/token-and-wallets/#keplr-wallet) to set one up. +3. **Fund Your Wallet**: Fund your Akash wallet with AKT tokens to pay for deployments. +4. **Install Docker**: Ensure Docker is installed and running on your local machine. +5. **Install a Code Editor**: Use an editor like VSCode for editing SDL files. + +## Step 1: Prepare the SDL File (deploy.yaml) + +Below is an example SDL file (deploy.yaml) for deploying Caddy using its official Docker image: + +``` +--- +version: "2.0" + +services: + caddy: + image: caddy:latest + env: + - CADDY_HOST=:80 + expose: + - port: 80 + to: + - global: true + +profiles: + compute: + caddy: + resources: + cpu: + units: 0.25 + memory: + size: 512Mi + storage: + size: 1Gi + + placement: + akash: + attributes: + region: us-west + pricing: + caddy: + denom: uakt + amount: 500 + +deployment: + caddy: + akash: + profile: caddy + count: 1 +``` + +### Notes: + +- **image**: Uses the official Caddy Docker image. +- **port**: Caddy listens on port 80 globally. +- **resources**: Allocates minimal CPU, memory, and storage resources. +- **pricing**: Defines a base price for deployment. + +## Step 2: Deploy Using Akash CLI + +1. Validate the SDL File: +``` +akash deployment validate deploy.yaml +``` + +2. Create Deployment: +``` +akash tx deployment create deploy.yaml --from --chain-id --node --fees uakt +``` +3. Query Lease: After creating the deployment, query the lease to ensure it’s active: +``` + akash query market lease list --owner --node +``` +4. Access the Deployment: Note the endpoint provided in the lease logs. Use this URL to access your Caddy instance. + +For detailed steps on using the CLI, refer to the Akash CLI Deployment Guide. + diff --git a/src/content/Docs/guides/hosting/cal_com/index.md b/src/content/Docs/guides/hosting/cal_com/index.md new file mode 100644 index 00000000..ecfff988 --- /dev/null +++ b/src/content/Docs/guides/hosting/cal_com/index.md @@ -0,0 +1,170 @@ +--- +categories: ["Guides"] +tags: ["Deployment"] +weight: 1 +title: " Guide to Deploying Cal.com on Akash Network" +linkTitle: "Cal.com" +--- + + +This guide walks you through the process of deploying **Cal.com**, an open-source scheduling platform, on **Akash**, a decentralized cloud computing platform. + +--- + +## **What is Cal.com?** + +**Cal.com** is an open-source, self-hosted scheduling solution that helps individuals and businesses manage appointments and bookings. It's a privacy-first alternative to scheduling tools like Calendly, offering full ownership of data. Key features include: + +- Integration with various calendar systems (Google Calendar, Outlook, etc.) +- Custom branding and themes +- Group scheduling +- Webhooks for advanced workflows + +--- + +## **Why Deploy on Akash?** + +Deploying Cal.com on Akash allows you to: +- Save costs compared to traditional cloud hosting platforms. +- Leverage a decentralized cloud infrastructure. +- Retain full control over your instance of Cal.com. + +--- + +## **Requirements** + +1. **Akash CLI**: Installed and configured. +2. **AKT Tokens**: For deployment. +3. **Cal.com Image**: `calcom/cal.com`. +4. **Domain Name**: Optional but recommended for production use. +5. **Akash SDL Template**: A template to define the deployment. + +--- + +## **Steps to Deploy Cal.com on Akash** + +### **Step 1: Set Up Akash Environment** + +1. Install the Akash CLI: + ```bash + curl -sSL https://raw.githubusercontent.com/ovrclk/akash/master/install.sh | sh + ``` + +2. Configure your wallet: + ```bash + akash keys add + ``` + +3. Fund your wallet with AKT tokens. + +4. Initialize the Akash CLI environment: + ```bash + akash provider list + ``` + +--- + +### **Step 2: Create SDL File** + +The **SDL (Stack Definition Language)** file defines the resources and deployment configuration. Below is a sample SDL file for deploying Cal.com: + +```yaml +version: "2.0" + +services: + calcom: + image: calcom/cal.com + env: + DATABASE_URL: + NEXT_PUBLIC_WEBAPP_URL: http:// + NEXTAUTH_SECRET: + NEXT_PUBLIC_TELEMETRY_DISABLED: "1" + expose: + - port: 3000 + as: 80 + to: + - global: true + +profiles: + compute: + calcom: + resources: + cpu: + units: 1 + memory: + size: 512Mi + storage: + size: 1Gi + + placement: + akash: + pricing: + calcom: + denom: uakt + amount: 50 + +deployment: + calcom: + calcom: + profile: calcom + count: 1 +``` + +Replace the following placeholders: +- ``: URL of the database where Cal.com will store its data (e.g., PostgreSQL). +- ``: The domain or IP address where the service will be accessible. +- ``: A secret key for authentication. + +--- + +### **Step 3: Deploy to Akash** + +1. **Validate the SDL file**: + ```bash + akash deployment validate .yaml + ``` + +2. **Create the deployment**: + ```bash + akash tx deployment create .yaml --from --chain-id + ``` + +3. **Bid on providers**: + Once the deployment is submitted, providers will bid on it. Accept a bid using: + ```bash + akash tx market lease create --dseq --from + ``` + +4. **Check Deployment Status**: + ```bash + akash deployment status --dseq + ``` + +--- + +### **Step 4: Access the Application** + +Once the deployment is active, note the provider's IP or domain in the status output. Use it to access your Cal.com instance. + +For production, set up a reverse proxy like **NGINX** and configure SSL certificates using **Let's Encrypt** for HTTPS. + +--- + +## **Optional: Connect Cal.com to PostgreSQL** + +To run Cal.com, you need a database. You can deploy PostgreSQL on Akash or use an external database service like **AWS RDS** or **Google Cloud SQL**. Update the `DATABASE_URL` in your SDL file accordingly. + +--- + +## **Troubleshooting** + +- **Logs**: Use the Akash CLI to fetch logs for debugging: + ```bash + akash deployment logs --dseq + ``` + +- **Configuration Issues**: Verify the environment variables and database connectivity. + +--- + +By following this guide, you can deploy and manage your own instance of Cal.com on Akash Network, combining the benefits of decentralized hosting and open-source scheduling. \ No newline at end of file diff --git a/src/content/Docs/guides/hosting/discourse/index.md b/src/content/Docs/guides/hosting/discourse/index.md new file mode 100644 index 00000000..0b44ca2a --- /dev/null +++ b/src/content/Docs/guides/hosting/discourse/index.md @@ -0,0 +1,146 @@ +--- +categories: ["Guides"] +tags: ["Hosting"] +weight: 1 +title: "Guide to Creating a Custom Website with Ghost and Deploying to Akash" +linkTitle: "Discourse" +--- + + +## **Overview of Discourse** + +**Discourse** is a modern, open-source discussion platform designed to improve online community interactions. Known for its robust features, it combines traditional forum-style discussions with modern tools for engagement. Features include real-time updates, robust moderation tools, extensive customization, and integration capabilities. Discourse is widely used by communities, businesses, and organizations to facilitate meaningful discussions. + +### **Key Features:** +1. **Customizable Design:** Flexible theming options for personalized user experiences. +2. **Real-Time Notifications:** Alerts for replies, mentions, and updates. +3. **Trust System:** Automated moderation based on user behavior. +4. **Integration-Friendly:** Seamless integration with services like Slack, WordPress, and more. +5. **Rich API:** Enables advanced automation and integrations. + +By deploying Discourse on Akash, you can leverage the decentralized cloud’s cost-effectiveness, scalability, and security to run your discussion forum in a trustless environment. + +--- + +## **Why Deploy Discourse on Akash Network?** + +**Akash Network** is a decentralized cloud computing platform, offering affordable and censorship-resistant hosting. Deploying Discourse on Akash enables users to run a secure and scalable community platform while reducing dependency on traditional centralized cloud providers. + +**Benefits:** +1. **Cost-Effective:** Save on hosting costs compared to traditional providers. +2. **Scalable:** Dynamically allocate resources to meet traffic demands. +3. **Decentralized:** Resilient against censorship and outages. +4. **Open Source Compatibility:** Easily deploy applications with Docker and Kubernetes support. + +--- + +## **Step-by-Step Deployment Guide** + +### **Prerequisites:** +1. **Akash CLI Installed:** Set up the [Akash CLI](https://docs.akash.network/) for managing deployments. +2. **Discourse Requirements:** + - A domain name with DNS configuration for SSL. + - Minimum of 2GB RAM and 1 CPU for Discourse. + - Docker installed in your environment. +3. **Akash Wallet:** Ensure your wallet is funded with $AKT tokens to pay for deployment. + +--- + +### **1. Prepare Discourse Docker Setup** +Discourse requires a Dockerized setup for deployment. Prepare the necessary Docker image and environment variables. + +- Use the official Discourse image: `discourse/discourse`. +- Set up the following environment variables in a file (e.g., `.env`): + ``` + DISCOURSE_HOSTNAME=forum.yourdomain.com + DISCOURSE_SMTP_ADDRESS=smtp.your-email-provider.com + DISCOURSE_SMTP_PORT=587 + DISCOURSE_SMTP_USER_NAME=your-email@example.com + DISCOURSE_SMTP_PASSWORD=your-password + ``` + +### **2. Create the Akash Deployment File** +Write an SDL (Service Definition Language) file that describes your deployment. Here’s an example: + +```yaml +version: "2.0" + +services: + discourse: + image: discourse/discourse:latest + env: + - DISCOURSE_HOSTNAME=forum.yourdomain.com + - DISCOURSE_SMTP_ADDRESS=smtp.your-email-provider.com + - DISCOURSE_SMTP_PORT=587 + - DISCOURSE_SMTP_USER_NAME=your-email@example.com + - DISCOURSE_SMTP_PASSWORD=your-password + expose: + - port: 80 + as: 80 + to: + - global: true + resources: + cpu: + units: 2 + memory: + size: 2Gi + storage: + size: 20Gi + +profiles: + compute: + discourse: + resources: + cpu: + units: 2 + memory: + size: 2Gi + storage: + size: 20Gi + placement: + akash: + attributes: + region: us-west + +deployment: + discourse: + discourse: + profile: discourse + count: 1 +``` + +### **3. Deploy on Akash** +1. **Create a Deployment:** + - Run the command to create your deployment: + ```bash + akash tx deployment create deployment.yaml --from --chain-id --node + ``` + - Confirm the transaction and note the deployment ID. + +2. **Bid on Resources:** + - Wait for providers to bid on your deployment and accept a bid: + ```bash + akash tx market lease create --dseq --from --chain-id --node + ``` + +3. **Access Your Deployment:** + - Get the external IP address assigned to your Discourse instance. Update your DNS records to point to this IP. + +### **4. Set Up SSL** +Use a tool like **Certbot** to generate SSL certificates or integrate Let's Encrypt to secure your Discourse instance. Update your Nginx or Traefik configuration for SSL termination. + +### **5. Final Configuration** +- Access your Discourse forum via the browser at `http://forum.yourdomain.com`. +- Follow the setup wizard to complete the configuration. +- Customize your forum with themes, plugins, and settings. + +--- + +## **Maintenance Tips** +- **Monitor Usage:** Regularly monitor resource usage and scale as needed. +- **Backup Data:** Use Akash’s storage features or external storage solutions like Filecoin or S3-compatible storage for regular backups. +- **Update Discourse:** Periodically update the Docker image to stay current with Discourse releases. + +--- + +By following this guide, you can host a robust, scalable, and decentralized Discourse forum on Akash Network, unlocking a new level of cost-efficiency and resilience for your community. \ No newline at end of file diff --git a/src/content/Docs/guides/hosting/ghost/index.md b/src/content/Docs/guides/hosting/ghost/index.md new file mode 100644 index 00000000..93100497 --- /dev/null +++ b/src/content/Docs/guides/hosting/ghost/index.md @@ -0,0 +1,195 @@ +--- +categories: ["Guides"] +tags: ["Hosting"] +weight: 1 +title: "Guide to Creating a Custom Website with Ghost and Deploying to Akash" +linkTitle: "Ghost" +--- + + +**Ghost** is an open-source platform for professional content creators. It's widely used for blogging, newsletters, and websites that prioritize simplicity and performance. Ghost is built on **Node.js** and offers features such as: + +1. **Modern Editing**: Markdown-based editor for creating visually rich content. +2. **Built-in SEO and Social Tools**: Simplifies optimization for search engines and social media. +3. **Custom Themes**: Flexibility to create unique designs with handlebars templates or buy premium themes. +4. **APIs for Custom Integration**: Ghost provides robust APIs for integrating with other tools. +5. **Self-hosting or Managed Hosting**: You can host Ghost on your own server or use Ghost Pro (managed hosting). + +--- + +## Steps to Create and Deploy a Custom Ghost Website to Akash + +### 1. **Set Up Ghost Locally** +#### Install Ghost CLI +Ghost CLI is a command-line tool for installing and managing Ghost. + +```bash +npm install -g ghost-cli +``` + +#### Create a New Ghost Instance +1. Create a directory for your project and navigate to it: + ```bash + mkdir my-ghost-site && cd my-ghost-site + ``` +2. Install Ghost: + ```bash + ghost install local + ``` +3. Access the local site in your browser at `http://localhost:2368`. + +#### Customize Your Ghost Website +1. **Choose or Create a Theme**: + - Download a theme from [Ghost Marketplace](https://ghost.org/marketplace/). + - Or, create a custom theme following the [Ghost Theme Documentation](https://ghost.org/docs/themes/). + + Place the theme in the `content/themes` directory. + +2. **Activate the Theme**: + - Access the admin panel at `http://localhost:2368/ghost`. + - Upload and activate your theme under "Settings > Design." + +3. Add content, configure SEO settings, and preview your website. + +--- + +### 2. **Prepare Ghost for Deployment** +1. **Export Data (Optional)**: + If you already have content, export it from the admin panel (`Settings > Labs > Export`). + +2. **Set Up Production Configuration**: + Update the `config.production.json` file with your production settings: + ```json + { + "url": "https://your-domain.com", + "server": { + "port": 2368, + "host": "0.0.0.0" + }, + "database": { + "client": "sqlite3", + "connection": { + "filename": "/path/to/ghost/content/data/ghost.db" + } + }, + "mail": { + "transport": "Direct" + }, + "logging": { + "level": "info" + }, + "process": "systemd" + } + ``` + +--- + +### 3. **Package Ghost for Akash** +#### Create a Dockerfile +Build a Docker image to containerize Ghost for Akash. + +```dockerfile +FROM ghost:latest + +# Set the working directory +WORKDIR /var/lib/ghost + +# Copy custom content +COPY ./content /var/lib/ghost/content + +# Expose the Ghost port +EXPOSE 2368 + +# Start Ghost +CMD ["npm", "start"] +``` + +#### Build and Push the Docker Image +1. Build the Docker image: + ```bash + docker build -t your-dockerhub-username/ghost-custom . + ``` +2. Push the image to Docker Hub: + ```bash + docker push your-dockerhub-username/ghost-custom + ``` + +--- + +### 4. **Deploy Ghost on Akash** +#### Install Akash CLI +Follow the installation instructions from the [Akash CLI Guide](deployments/akash-cli/overview/). + +#### Create an SDL File +Define the deployment parameters in an SDL file (`deploy.yaml`): + +``` +version: "2.0" + +services: + ghost: + image: your-dockerhub-username/ghost-custom:latest + expose: + - port: 2368 + as: 80 + to: + - global: true + +profiles: + compute: + ghost: + resources: + cpu: + units: 1 + memory: + size: 512Mi + storage: + size: 1Gi + + placement: + default: + attributes: + region: us-west + signedBy: + anyOf: + - "akash1abcd...xyz" + pricing: + ghost: + denom: uakt + amount: 100 + +deployment: + ghost: + profiles: + - compute: ghost + placement: default + count: 1 +``` + +#### Deploy to Akash +1. Deploy the application: + ```bash + akash tx deployment create deploy.yaml --from your-wallet + ``` +2. Check the status of your deployment: + ```bash + akash query deployment list --owner your-wallet-address + ``` + +#### Point a Domain to Your Akash Deployment +1. Get the public IP of your deployment. +2. Configure your DNS settings to point your domain to this IP. + +--- + +### 5. **Verify and Maintain** +1. Access your website using your domain. +2. Monitor logs to ensure smooth operation: + ```bash + docker logs -f + ``` +3. Update Ghost or your custom theme when needed. + +--- + +This guide gives you a flexible and cost-effective way to host your Ghost website on the Akash decentralized cloud. \ No newline at end of file diff --git a/src/content/Docs/guides/hosting/wordpress/index.md b/src/content/Docs/guides/hosting/wordpress/index.md new file mode 100644 index 00000000..62e61752 --- /dev/null +++ b/src/content/Docs/guides/hosting/wordpress/index.md @@ -0,0 +1,169 @@ +--- +categories: ["Guides"] +tags: ["Hosting"] +weight: 1 +title: "Deploying WordPress on Akash Network" +linkTitle: "WordPress" +--- + +## Guide to Create and Deploy a Custom and WordPress Website on Akash Network Using a Single SDL File + +This guide provides step-by-step instructions to set up a custom and WordPress website and deploy it to Akash using a single SDL (Service Deployment Language) file. The SDL will configure a full installation environment, including the web server, database, and WordPress. + +--- + +## Prerequisites +1. **Akash CLI installed**: Ensure you have the Akash CLI installed and configured. +2. **Akash account funded**: Your Akash wallet should have sufficient funds for deployment. +3. **Domain setup**: Optionally, set up a domain with DNS pointing to your Akash deployment. +4. **Docker familiarity**: Basic understanding of Docker containers as Akash uses containerized workloads. +5. **Akash SDL template**: Use a preconfigured SDL format for Akash deployments. + +--- + +## Step 1: Write the SDL File + +Below is an SDL file that includes both a WordPress installation and a MySQL database, deployed in a single setup. + +```yaml +version: "2.0" + +services: + wordpress: + image: wordpress:latest + env: + - WORDPRESS_DB_HOST=mysql:3306 + - WORDPRESS_DB_USER=wp_user + - WORDPRESS_DB_PASSWORD=wp_password + - WORDPRESS_DB_NAME=wp_database + expose: + - port: 80 + as: 80 + to: + - global: true + depends_on: + - mysql + resources: + cpu: + units: 0.5 + memory: + size: 512Mi + storage: + size: 1Gi + + mysql: + image: mysql:5.7 + env: + - MYSQL_ROOT_PASSWORD=root_password + - MYSQL_DATABASE=wp_database + - MYSQL_USER=wp_user + - MYSQL_PASSWORD=wp_password + resources: + cpu: + units: 0.5 + memory: + size: 512Mi + storage: + size: 1Gi + +profiles: + compute: + wordpress: + resources: + cpu: + units: 0.5 + memory: + size: 512Mi + storage: + size: 1Gi + mysql: + resources: + cpu: + units: 0.5 + memory: + size: 512Mi + storage: + size: 1Gi + placement: + akash: + attributes: + region: global + pricing: + wordpress: + denom: uakt + amount: 100 + mysql: + denom: uakt + amount: 50 + +deployment: + wordpress: + akash: + profile: wordpress + count: 1 + mysql: + akash: + profile: mysql + count: 1 +``` + +--- + +## Step 2: Customize the SDL File + +- **Database Credentials**: Update the environment variables in the SDL (`MYSQL_ROOT_PASSWORD`, `MYSQL_USER`, `MYSQL_PASSWORD`, etc.). +- **Storage Size**: Adjust the `storage.size` parameter for both services based on your expected website and database usage. +- **CPU and Memory**: Allocate appropriate `cpu` and `memory` resources depending on your workload. + +--- + +## Step 3: Deploy to Akash Network + +1. **Initialize Deployment**: + ```bash + akash tx deployment create --from --node --chain-id + ``` + +2. **Query Lease**: + Find available providers and create a lease: + ```bash + akash query market lease list --owner + akash tx market lease create --dseq --oseq --gseq --from + ``` + +3. **Verify Deployment**: + Ensure the deployment is active and the services are running: + ```bash + akash query deployment get --owner --dseq + ``` + +4. **Access the Website**: + - Obtain the deployment's public IP or domain: + ```bash + akash query provider lease-status --owner --dseq --provider + ``` + - Configure DNS to map your domain to the provided IP or access via the generated IP. + +--- + +## Step 4: Complete WordPress Setup + +1. Open the WordPress installation URL in your browser (`http://`). +2. Follow the on-screen instructions to: + - Set up the admin account. + - Configure the website title and language. + - Complete the WordPress installation. + +--- + +## Notes + +- **File Persistence**: To retain WordPress and MySQL data across redeployments, use Akash’s persistent storage or configure external backups. +- **Domain Integration**: Use services like Cloudflare to easily point your domain to the Akash deployment. +- **Security**: Secure your deployment by: + - Updating passwords. + - Configuring HTTPS using reverse proxies like Traefik or Nginx. + +--- + +This setup leverages a single SDL file for ease of deployment, ensuring the entire WordPress stack (web server + database) operates seamlessly on the Akash Network. \ No newline at end of file diff --git a/src/content/Docs/guides/machine-learning/anythingllm/index.md b/src/content/Docs/guides/machine-learning/anythingllm/index.md new file mode 100644 index 00000000..8b906649 --- /dev/null +++ b/src/content/Docs/guides/machine-learning/anythingllm/index.md @@ -0,0 +1,121 @@ +--- +categories: ["Guides"] +tags: ["Machine Learning","LLMs"] +weight: 1 +title: "Deploying AnythingLLM on Akash Network" +linkTitle: "AnythingLLM" +--- + +**AnythingLLM** is an open-source solution designed to enhance productivity by offering fine-tuned language models tailored to specific tasks or datasets. By hosting and running your own instance of AnythingLLM, you can utilize its capabilities for natural language processing (NLP) tasks like text summarization, question answering, or generating context-aware responses. + +- **Features:** + - User-friendly API for language model interaction. + - Fine-tuning capabilities for specific datasets. + - Flexible deployment on various platforms. + +**Akash Network** is a decentralized cloud computing platform that provides an affordable, efficient, and censorship-resistant environment to host applications like AnythingLLM. + +--- + +## Prerequisites + +1. **Install Akash CLI**: + - Download and install the [Akash CLI](https://docs.akash.network/guides/install-cli). +2. **Set Up Your Wallet**: + - Create a wallet and fund it with AKT tokens. + - Follow the [wallet setup guide](https://docs.akash.network/guides/wallet-setup). +3. **Akash Deployment Account**: + - Ensure you have a deployment account set up with the Akash CLI. +4. **Docker Image**: + - We will use the Docker image `mintplexlabs/anythingllm` for deployment. + +--- + +## Sample SDL for Deploying AnythingLLM on Akash + +Below is a sample Service Descriptor Language (SDL) file that you can use to deploy AnythingLLM on Akash. + +```yaml +--- +version: "2.0" + +services: + anythingllm: + image: mintplexlabs/anythingllm:latest + expose: + - port: 5000 + as: 80 + to: + - global: true + +profiles: + compute: + anythingllm: + resources: + cpu: + units: 500m + memory: + size: 1Gi + storage: + size: 5Gi + + placement: + akash: + pricing: + anythingllm: + denom: uakt + amount: 100 + +deployment: + anythingllm: + akash: + profile: anythingllm + count: 1 +``` + +--- + +## Steps to Deploy AnythingLLM on Akash + +1. **Prepare the SDL File**: + - Save the above SDL file as `deploy.yaml`. + +2. **Validate the SDL**: + Run the following command to validate your SDL file: + ```bash + akash tx deployment validate anythingllm.yaml + ``` + +3. **Create the Deployment**: + Submit the SDL file to the Akash network to create a deployment: + ```bash + akash tx deployment create anythingllm.yaml --from + ``` + +4. **Bid on a Provider**: + After creating the deployment, providers will bid to host it. Accept a suitable bid: + ```bash + akash query market lease list --owner + akash tx market lease create --owner --dseq --gseq --oseq --from + ``` + +5. **Access the Application**: + - Once the deployment is live, you can access AnythingLLM at the URL provided by your Akash provider. + - Ensure the application is reachable on the global port (80 as specified in the SDL). + +6. **Monitor Logs**: + View logs to ensure the service is running correctly: + ```bash + akash provider lease logs --dseq --gseq --oseq + ``` + +--- + +## Additional Configuration (Optional) +- **Environment Variables**: Customize AnythingLLM by passing environment variables in the `services` section of the SDL. +- **Storage**: Increase storage if your datasets are large. + +--- + +## Conclusion +By following this guide, you can successfully deploy AnythingLLM on Akash. This deployment leverages Akash's decentralized infrastructure to host your NLP service affordably and securely. \ No newline at end of file diff --git a/src/content/Docs/guides/machine-learning/dl4j/index.md b/src/content/Docs/guides/machine-learning/dl4j/index.md new file mode 100644 index 00000000..152703b6 --- /dev/null +++ b/src/content/Docs/guides/machine-learning/dl4j/index.md @@ -0,0 +1,165 @@ +--- +categories: ["Guides"] +tags: ["AI/ML", ] +weight: 1 +title: "uide to Deploy Deeplearning4j on Akash Network" +linkTitle: "Deeplearning4j" +--- + + +## **Overview of Deeplearning4j** + +Deeplearning4j (DL4J) is an open-source, distributed deep-learning framework written for Java and Scala. Designed for enterprise-grade use, DL4J integrates seamlessly with modern big data tools like Apache Spark and Hadoop, enabling powerful deep learning on distributed systems. Its versatility makes it a solid choice for building machine learning pipelines, neural networks, and other AI-based applications. + +### **Key Features** +- **Scalable and Distributed**: Ideal for running on clusters with integration for Spark, Hadoop, and Kubernetes. +- **Customizable**: Supports a wide range of neural network architectures, including convolutional neural networks (CNNs), recurrent neural networks (RNNs), and more. +- **Enterprise Integration**: Works well with Java and JVM-based environments for enterprise applications. +- **Cross-Platform**: Runs on Linux, Windows, and macOS. +- **GPU/CPU Support**: Optimized for NVIDIA GPUs with CUDA or CPU-only systems. + +Deploying DL4J on Akash allows you to leverage decentralized cloud computing resources for cost-effective, scalable machine learning. + + + +## **Steps to Deploy Deeplearning4j on Akash** + + +### **Prerequisites** +1. **Akash Wallet**: Set up and funded with $AKT tokens. +2. **Akash CLI**: Installed and configured. +3. **Dockerized DL4J Application**: A Docker image containing your DL4J application. +4. **SDL Template**: The SDL file for deployment. + +--- + +### **Step 1: Prepare a Dockerized Deeplearning4j Application** + +1. Create a Dockerfile for your DL4J application. Below is a sample: + + ```dockerfile + FROM openjdk:11-jdk-slim + + # Install dependencies + RUN apt-get update && apt-get install -y \ + maven \ + && rm -rf /var/lib/apt/lists/* + + # Set working directory + WORKDIR /app + + # Copy project files + COPY . . + + # Build the application + RUN mvn clean package + + # Expose the application port + EXPOSE 8080 + + # Run the application + CMD ["java", "-jar", "target/your-dl4j-app.jar"] + ``` + +2. Build and tag your Docker image: + + ```bash + docker build -t your-dl4j-image . + ``` + +3. Push the Docker image to a registry (e.g., Docker Hub): + + ```bash + docker tag your-dl4j-image username/your-dl4j-image + docker push username/your-dl4j-image + ``` + +--- + +### **Step 2: Create an SDL File for Akash** + +An SDL (Service Definition Language) file defines the deployment configuration. Below is a sample SDL for deploying the DL4J Docker container on Akash: + +```yaml +version: "2.0" + +services: + dl4j-service: + image: username/your-dl4j-image:latest + expose: + - port: 8080 + as: 80 + to: + - global: true + +profiles: + compute: + dl4j-profile: + resources: + cpu: + units: 2 + memory: + size: 4Gi + storage: + size: 10Gi + + placement: + dl4j-placement: + attributes: + region: us-west + signedBy: + anyOf: + - akash1xyz... # Replace with your provider's address + pricing: + dl4j-service: + denom: uakt + amount: 100 + +deployment: + dl4j-deployment: + dl4j-profile: + - dl4j-placement +``` + +--- + +### **Step 3: Deploy on Akash** + +1. **Submit the SDL file**: + Use the Akash CLI to submit the SDL file and create a deployment. + + ```bash + akash tx deployment create deploy.yml --from + ``` + +2. **Bid Selection**: + Choose a provider from the bids and accept their offer. + + ```bash + akash query market bid list --owner + ``` + +3. **Lease Creation**: + After selecting the bid, create a lease: + + ```bash + akash tx market lease create --dseq --oseq --gseq --from + ``` + +4. **Access Your Application**: + Once deployed, Akash will provide an external endpoint to access your DL4J service. + +--- + +## **Monitoring and Maintenance** + +- **Logs**: Use the Akash CLI to retrieve service logs. + ```bash + akash provider lease-logs --dseq --gseq --oseq --provider + ``` + +- **Scale Resources**: Modify the SDL file and re-submit for scaling up/down CPU, memory, or storage. + +--- + +By deploying Deeplearning4j on Akash, you can achieve scalable, decentralized, and cost-efficient machine learning workloads while leveraging the flexibility of DL4J and the power of Akash Network. \ No newline at end of file diff --git a/src/content/Docs/guides/machine-learning/flow/index.md b/src/content/Docs/guides/machine-learning/flow/index.md new file mode 100644 index 00000000..81d21307 --- /dev/null +++ b/src/content/Docs/guides/machine-learning/flow/index.md @@ -0,0 +1,115 @@ +--- +categories: ["Guides"] +tags: ["Artificial Intelligence/Machine Learning"] +weight: 1 +title: "H2O Flow" +linkTitle: "H2O Flow" +--- + +H2O Flow is a tool for machine learning workflows, typically running as part of the H2O.ai suite, and Akash offers a decentralized cloud environment where you can host this application. + +## Step 1: Write the deploy.yaml File for H2O Flow + +1. **Define Basic Configuration** Create a new file named `deploy.yaml`. Inside this file, include the necessary fields that Akash requires, such as version, services, profiles, and deployments. Here’s a sample structure: + +``` +version: "2.0" + +services: + h2o-flow: + image: "h2oai/h2o-open-source-k8s:latest" + args: ["java", "-jar", "/h2o.jar", "-flow_dir", "/h2oflow"] + expose: + - port: 54321 + as: 80 + to: + - global: true + +profiles: + compute: + h2o-flow: + resources: + cpu: + units: 1 + memory: + size: 2Gi + storage: + size: 5Gi + + placement: + akash: + attributes: + region: "us-west" + +deployments: + h2o-deployment: + h2o-flow: + profile: h2o-flow + count: 1 +``` + +Here’s a breakdown of what each section is doing: + +- **Services**: Defines the H2O Flow service, specifying the Docker image (`h2oai/h2o-open-source-k8s:latest`). This image includes H2O Flow in the /`h2o.jar` file. + +- **Profiles**: Sets the required resources like CPU, memory, and storage. + +- **Deployments**: Specifies how many instances of the service will be created (here, just one). + +2. Save the `deploy.yaml` file in your working directory. + +## Step 2: Install Akash CLI + +If you haven’t already installed the Akash CLI, install it by following these instructions: + +- Download the Akash CLI from their [GitHub Releases page](https://github.com/ovrclk/akash/releases). +- Install the CLI by following the instructions for your operating system. + +## Step 3: Initialize and Fund Your Wallet + +1. **Create a Wallet** if you don’t already have one: + +``` +akash keys add +``` + +2. **Fund Your Wallet**: Get some Akash tokens (AKT) by either purchasing them or using the [faucet](https://faucet.sandbox-01.aksh.pw/) if available. + +3. **Check Your Balance**: + +``` +akash query bank balances +``` + +## Step 4: Deploy on Akash + +1. **Create a Certificate**: + +``` +akash tx cert create client --from --chain-id akashnet-2 --fees 5000uakt +``` + +2. **Create a Deployment**: Run the following command to start the deployment with your deploy.yaml file: + +``` +akash tx deployment create deploy.yaml --from --chain-id akashnet-2 --fees 5000uakt +``` + +3. **View the Deployment Status**: Once deployed, you can check the status by running: + +``` +akash query deployment list --owner +``` + +4. **Get the Service Endpoint**: Once the deployment is live, you will get an external IP or domain through which you can access H2O Flow. Use this to connect to the H2O Flow service. + +## Step 5: Connect to H2O Flow + +1. Open a browser. + +2. Navigate to `http://`. + +3. You should see the H2O Flow interface. + + +This should give you a functional deployment of H2O Flow on Akash! diff --git a/src/content/Docs/guides/machine-learning/flowiseai/index.md b/src/content/Docs/guides/machine-learning/flowiseai/index.md new file mode 100644 index 00000000..c1a17493 --- /dev/null +++ b/src/content/Docs/guides/machine-learning/flowiseai/index.md @@ -0,0 +1,132 @@ +--- +categories: ["Guides"] +tags: ["AI/ML", ] +weight: 1 +title: "FlowiseAI" +linkTitle: "FlowiseAI" +--- + +## Overview of FlowiseAI + +FlowiseAI is an open-source, visual tool designed to enable the creation and deployment of LLM (Large Language Model)-powered chatbots. It allows users to connect LLMs with various data sources, customize workflows, and deploy intelligent conversational agents easily. FlowiseAI is well-liked for its intuitive user interface and compatibility with leading LLMs like OpenAI's GPT, Hugging Face models, and more. + +By deploying FlowiseAI on Akash, a decentralized cloud computing platform, you can achieve cost-effective and scalable hosting without relying on centralized cloud providers. + +--- + +## **Deploying FlowiseAI on Akash** + +To deploy FlowiseAI on Akash, follow these steps: + +--- + +### **Step 1: Prerequisites** + +1. **Install Akash CLI**: Ensure the Akash CLI is installed on your local machine. Consult the [Akash CLI installation guide](https://docs.akash.network/guides/install). + +2. **Create an Akash Wallet**: Use the Akash CLI to create a wallet and fund it with AKT tokens. + +3. **Set Up the FlowiseAI Docker Image**: FlowiseAI is distributed as a Docker container. The official image is `flowiseai/flowise`. + +4. **Prepare the SDL File**: Create a deployment specification (SDL file) to describe your application. + +--- + +### **Step 2: Write the SDL File** + +Below is a sample SDL file for deploying FlowiseAI on Akash: + +```yaml +version: "2.0" + +services: + flowiseai: + image: flowiseai/flowise:latest + expose: + - port: 3000 + as: 80 + to: + - global: true + +profiles: + compute: + flowiseai: + resources: + cpu: + units: 0.5 + memory: + size: 512Mi + storage: + size: 1Gi + + placement: + akash: + attributes: + region: us-west + signedBy: + anyOf: + - akash + pricing: + flowiseai: + denom: uakt + amount: 1000 + +deployment: + flowiseai: + flowiseai: + profile: flowiseai + count: 1 +``` + +--- + +### **Step 3: Deploy the Application** + +1. **Validate the SDL File**: + Run the following command to validate the SDL file: + ```bash + akash tx deployment create --from --chain-id + ``` + +2. **Check Deployment Status**: + Use the command below to verify your deployment: + ```bash + akash query deployment list --owner + ``` + +3. **Bid Selection**: + Select a provider from the available bids list and accept the bid to finalize your deployment: + ```bash + akash tx market lease create --from --chain-id --bid-id + ``` + +--- + +### **Step 4: Access FlowiseAI** + +1. After the deployment becomes active, retrieve the access details (e.g., domain or IP address) from the provider's dashboard or the Akash CLI. + +2. Open a browser and navigate to the provided URL to access the FlowiseAI interface. + +--- + +### **Step 5: Customize FlowiseAI** + +1. **Configure Workflows**: + Log in to the FlowiseAI interface to set up LLM workflows and connect data sources. + +2. **Add Integrations**: + Integrate with OpenAI, Hugging Face, or other services by configuring API keys in the FlowiseAI dashboard. + +3. **Deploy Chatbots**: + Utilize the platform to deploy and test your chatbot in production settings. + +--- + +## Benefits of Deploying FlowiseAI on Akash + +- **Decentralized Hosting**: Minimize reliance on centralized cloud providers. +- **Cost Efficiency**: Benefit from competitive pricing in the Akash marketplace. +- **Scalability**: Easily adjust resources based on demand using the Akash platform. + +By following this guide, you can quickly deploy FlowiseAI on Akash and start building powerful LLM-powered chatbots. \ No newline at end of file diff --git a/src/content/Docs/guides/machine-learning/h2o/index.md b/src/content/Docs/guides/machine-learning/h2o/index.md new file mode 100644 index 00000000..5cbb67af --- /dev/null +++ b/src/content/Docs/guides/machine-learning/h2o/index.md @@ -0,0 +1,148 @@ +--- +categories: ["Guides"] +tags: ["Artificial Intelligence/Machine Learning"] +weight: 1 +title: "H2O" +linkTitle: "H2O" +--- + +[H2O](https://h2o.ai/) is an in-memory platform for distributed, scalable machine learning. H2O uses familiar interfaces like R, Python, Scala, Java, JSON and the Flow notebook/web interface, and works seamlessly with big data technologies like Hadoop and Spark. + +This guide covers deploying the H2O-3 (the third incarnation of H2O, and the successor to H2O-2) open-source machine learning platform on the Akash network. We'll use a sample SDL (Stack Definition Language) template and demonstrate deployment through both the Akash CLI and the Akash Console. + +## Prerequisites + + - Akash Network Account: Set up your wallet and fund it with AKT. + - Akash CLI: Installed and configured on your machine. + - Akash Console: Accessible at console.akash.network. + - H2O-3 Docker Image: We'll use the official Docker image for H2O. + +## Step 1: Sample SDL Template + +The following SDL template defines the deployment of H2O-3: + +``` +version: "2.0" + +services: + h2o3: + image: h2oai/h2o-open-source-ai:latest + env: + - H2O_DRIVER_PORT=54321 + - H2O_WEB_PORT=54322 + expose: + - port: 54321 + as: 80 + accept: + - 0.0.0.0/0 + to: + - global + - port: 54322 + as: 443 + accept: + - 0.0.0.0/0 + to: + - global + +profiles: + compute: + h2o3: + resources: + cpu: + units: 1 + memory: + size: 2Gi + storage: + size: 5Gi + + placement: + akash: + attributes: + region: us-west + pricing: + h2o3: + denom: uakt + amount: 1000 + +deployment: + h2o3: + akash: + profile: h2o3 + count: 1 +``` + +## Step 2: Deploy Using Akash CLI + +1. **Create the Deployment** + +Save the above SDL template as `deploy.yaml`. Run the following commands to deploy via the CLI: + +``` +akash tx deployment create deploy.yml --from --chain-id --node --gas-prices --gas-adjustment +``` + +2. **Bid on the Deployment** + +Fetch the deployment status: + +``` +akash query market lease list --owner --dseq +``` + +Look for active bids and create a lease with: + +``` +akash tx market lease create --owner --dseq --gseq --oseq --from --chain-id --node --gas-prices --gas-adjustment +``` + +3. **Access Your Deployment** + +Once the lease is active, query the lease status to find the assigned external IP: + +``` +akash query market lease status --owner --dseq --gseq --oseq --provider +``` + +Visit <`assigned_IP`>:80 for the H2O API and <`assigned_IP`>:443 for the web UI. + +## Step 3: Deploy Using Akash Console + +1. **Login**: + + - Visit [Akash Console](https://console.akash.network/) and log in using your Akash wallet. + +2. **Create Deployment**: + + - Navigate to "Deployments" and click "Create Deployment." + - Upload the `deploy.yaml` file or paste the SDL directly. + +3. **Review and Submit**: + + - Review the SDL details. + - Submit the deployment and wait for bids. + +4. **Select a Bid**: + + - Once bids are available, select the most suitable provider based on pricing and region. + +5. **Access Your Deployment**: + + - After the lease is created, the provider will assign an external IP. Use this IP to access the H2O services. + +## Step 4: Testing and Validation + + - **API Test**: Access the H2O-3 REST API at `http://:80/3/`. + + - **Web UI Test**: Visit` https://:443/` for the H2O-3 web interface. + + - **Monitor Logs**: Use the Akash CLI to view logs for the deployment: + + ``` + akash logs --dseq --gseq --oseq --provider --from + ``` + +## Additional Notes + +- Ensure the `uakt` balance in your wallet can cover deployment and lease costs. +- Adjust compute resources in the SDL file if H2O-3 requires more memory or CPU. +- Use SSL/TLS if deploying in production to secure the web interface. \ No newline at end of file diff --git a/src/content/Docs/guides/machine-learning/h2ogpt/index.md b/src/content/Docs/guides/machine-learning/h2ogpt/index.md new file mode 100644 index 00000000..421da79d --- /dev/null +++ b/src/content/Docs/guides/machine-learning/h2ogpt/index.md @@ -0,0 +1,142 @@ +--- +categories: ["Guides"] +tags: ["Artificial Intelligence/Machine Learning"] +weight: 1 +title: "h2oGPT" +linkTitle: "h2oGPT" +--- + +h2oGPT is an Apache V2 open-source project that allows you to query and summarize your documents or just chat with local private GPT LLMs. + + +## Prerequisites + +1. **Akash Account**: Ensure you have an active Akash wallet with AKT tokens for deployment costs. +2. **Akash CLI**: Installed and configured on your system. +3. **Dockerized h2oGPT**: h2oGPT container image available (e.g., from Docker Hub or built locally). +4. **Akash Console**: Access to the [Akash Console](https://console.akash.network/). + +## Part 1: Create a Docker Image for h2oGPT + +1. Clone the Repository: +``` +git clone https://github.com/h2oai/h2ogpt.git +cd h2ogpt +``` + +2. Build the Docker Image: Ensure Docker is installed and running on your machine, then build the image. + +``` +docker build -t h2ogpt:latest . +``` + +3. Test the Docker Image Locally (Optional): Run the Docker container to confirm that h2oGPT is working correctly. + +``` +docker run -p 7860:7860 h2ogpt:latest +``` +Open your browser and navigate to `http://localhost:7860` to confirm the app runs successfully. + +4. Push the Docker Image to a Registry: Tag and push the image to a container registry like Docker Hub or GitHub Packages. +``` +docker tag h2ogpt:latest /h2ogpt:latest +docker push /h2ogpt:latest +``` + + + +## Part 2: Create an SDL File + +Below is an example SDL file to deploy h2oGPT. + +``` +--- +version: "2.0" + +services: + h2ogpt-service: + image: /h2ogpt:latest + expose: + - port: 7860 + as: 80 + to: + - global: true + env: + - H2O_ENV_VARIABLE_1=value1 # Replace with actual environment variables + - H2O_ENV_VARIABLE_2=value2 +profiles: + compute: + h2ogpt-profile: + resources: + cpu: + units: 4 + memory: + size: 8Gi + storage: + size: 20Gi + placement: + h2ogpt-placement: + attributes: + region: us-west # Change based on preference + signedBy: + anyOf: + - akash1 +deployment: + h2ogpt-deployment: + h2ogpt-service: + profile: h2ogpt-profile + count: 1 +``` + +## Part 3: Deploy Using Akash CLI + +1. Install Akash CLI: Follow the official [Akash CLI setup guide](docs/getting-started/quickstart-guides/akash-cli/). + +2. Fund Your Wallet: Ensure your Akash wallet is funded with AKT for deployment fees. + +3. Create and Send the Deployment: + + - Save the SDL file as deploy.yaml. + - Create the deployment: + ``` + akash tx deployment create h2ogpt-deployment.yaml --from --node https://rpc.akashnet.io:443 --chain-id akashnet-2 + ``` + - Query for bids: + ``` + akash query market bid list --owner --node https://rpc.akashnet.io:443 + ``` + - Accept a bid: + ``` + akash tx market lease create --owner --dseq --gseq --oseq --provider --from + ``` +4. Retrieve Deployment Logs: Use the following command to check the logs: + +``` +akash provider lease-logs --provider --dseq --gseq --oseq +``` + +5. Access h2oGPT: Find the deployment's external IP or domain in the logs, then navigate to it in your browser. + +## Part 4: Deploy Using Akash Console + +1. Access Akash Console: Open the [Akash Console](https://console.akash.network/) in your browser. + +2. Log In: + + - Import your Akash wallet or create a new one. + - Ensure your wallet is funded with AKT. + +3. Create a Deployment: + + - Click Create Deployment and paste the SDL file created earlier. + - Review the details and deploy. + +4. Monitor Bids: + + - Wait for bids from providers. + - Select a bid and finalize the deployment. + +5. Access h2oGPT: + + - Locate the provider's external IP or domain. + - Open it in your browser to interact with the application. \ No newline at end of file diff --git a/src/content/Docs/guides/machine-learning/h2ollmstudio/index.md b/src/content/Docs/guides/machine-learning/h2ollmstudio/index.md new file mode 100644 index 00000000..c33e4146 --- /dev/null +++ b/src/content/Docs/guides/machine-learning/h2ollmstudio/index.md @@ -0,0 +1,169 @@ +--- +categories: ["Guides"] +tags: ["Artificial Intelligence/Machine Learning"] +weight: 1 +title: "H2O LLM Studio" +linkTitle: "H2O LLM Studio" +--- + +![](../../../assets/h20llm.png) + +Here’s a comprehensive guide to deploying the H2O LLM on Akash using your SDL template. The guide covers both the Akash CLI and Akash Console methods. + +## Prerequisites + +1. **Akash Wallet**: Ensure you have an Akash wallet with sufficient $AKT to cover deployment costs and fees. + +2. **Akash CLI**: Install the Akash CLI from the [official documentation](docs/deployments/akash-cli/overview/). + +3. **Akash Console**: Access the Akash Console. + +4. **SDL Template**: Have your SDL template prepared for deploying H2O LLM. + +## Sample SDL Template for H2O LLM + +``` +version: "2.0" + +services: + h2o-llm: + image: h2oai/h2ogpt:latest # Replace with the specific H2O LLM image if needed + expose: + - port: 8080 + as: 80 + to: + - global: true + env: + - MODEL_NAME=gptj # Replace with the desired model name + - MAX_MEMORY=16g + - NUM_THREADS=4 + resources: + cpu: + units: 2 + memory: + size: 16Gi + storage: + size: 20Gi + +profiles: + compute: + h2o: + resources: + cpu: + units: 2 + memory: + size: 16Gi + storage: + size: 20Gi + placement: + default: + attributes: + region: us-west + pricing: + h2o: + denom: uakt + amount: 100 + +deployment: + h2o-deployment: + h2o-llm: + profile: h2o + count: 1 +``` + +Modify the template as per your requirements for resources and regions. + +## Using Akash CLI + +### Step 1: Create Deployment File + +Save the SDL file as `deploy.yaml` in your project directory. + +### Step 2: Inspect the SDL + +Run the following command to validate the SDL file: + +``` +akash tx deployment create h2o-llm.yml --from --node --chain-id --keyring-backend +``` + +### Step 3: Submit Deployment + +Deploy the service: + +``` +akash tx deployment create ./h2o-llm.yml --from --gas auto --gas-prices 0.025uakt --gas-adjustment 1.2 +``` + +### Step 4: Wait for Providers + +``` +akash query market bid list --owner +``` + +### Step 5: Accept a Bid + +Choose a provider and accept the bid: + +``` +akash tx market lease create \ + --bid-id \ + --from \ + --gas auto \ + --gas-prices 0.025uakt +``` + +### Step 6: Access Your Deployment + +Retrieve the lease status to get the external IP: + +``` +akash provider lease-status \ + --dseq \ + --from \ + --provider +``` + +Access the H2O LLM via the provided IP address. + +## Using Akash Console + +### Step 1: Log In + +1. Open the [Akash Console](https://console.akash.network/). + +2. Connect your wallet and ensure you have funds. + +### Step 2: Upload the SDL File + +1. Click on **New Deployment**. + +2. Upload your `deploy.yaml` file. + +3. Review the parsed configuration. + +### Step 3: Submit Deployment + +1. Confirm the details. + +2. Submit the deployment and wait for provider bids. + +### Step 4: Review Bids + +1. Once providers offer bids, review the pricing and terms. + +2. Select a provider and accept the bid. + +### Step 5: Access Deployment + +1. The deployment will begin provisioning. + +2. Once ready, obtain the external IP from the deployment details. + +3. Access the H2O LLM via the public IP. + +Notes + +- Environment Variables: Adjust MODEL_NAME, MAX_MEMORY, and other variables in the SDL to suit your specific needs. +- Scaling: To scale horizontally, modify the count field in the deployment section. +- Troubleshooting: Use akash logs for debugging issues with the deployment. \ No newline at end of file diff --git a/src/content/Docs/guides/machine-learning/haystack/index.md b/src/content/Docs/guides/machine-learning/haystack/index.md new file mode 100644 index 00000000..8d2c3aae --- /dev/null +++ b/src/content/Docs/guides/machine-learning/haystack/index.md @@ -0,0 +1,164 @@ +--- +categories: ["Guides"] +tags: ["Machine Learning","Search"] +weight: 1 +title: "Deploying Haystack on Akash Network" +linkTitle: "Haystack" +--- + + + +This guide outlines how to deploy **Haystack**, a versatile NLP framework for building search systems, on **Akash**, the decentralized cloud computing platform. The deployment uses Akash's `SDL` template for efficient and cost-effective provisioning of compute resources. + +--- + +## **What is Haystack?** + +Haystack, developed by **deepset**, is a framework for building powerful, customizable, and production-ready search systems powered by natural language processing (NLP). It supports a variety of use cases, such as question answering, semantic search, and document indexing. Some of Haystack’s features include: + +- **Search Pipelines**: Build pipelines to handle questions and retrieve documents. +- **Multiple Models**: Integrate models like BERT, RoBERTa, and others for document processing and Q&A. +- **Pluggable Components**: Mix and match retrievers, readers, and indexing tools. +- **Backend Integration**: Works seamlessly with Elasticsearch, OpenSearch, FAISS, and more. +- **APIs**: Offers REST API endpoints for interfacing with Haystack pipelines. + +For this deployment, we will use the official **deepset/haystack** Docker image. + +--- + +## **Prerequisites** + +1. **Akash CLI**: Install and configure the Akash CLI. +2. **Akash Wallet**: Fund your Akash wallet with sufficient AKT tokens. +3. **Docker**: Ensure Docker is installed on your machine for testing. +4. **SDL Template**: Prepare the SDL file for your Haystack deployment. + +--- + +## **Sample SDL for Haystack Deployment** + +Here is an example of an SDL file to deploy Haystack on Akash: + +```yaml +version: "2.0" + +services: + haystack: + image: deepset/haystack:latest + env: + - WORKERS=1 + - DEBUG=true + expose: + - port: 8000 + as: 80 + accept: + - http + to: + - global + +profiles: + compute: + haystack: + resources: + cpu: + units: 1 + memory: + size: 2Gi + storage: + size: 5Gi + placement: + westcoast: + attributes: + region: us-west + pricing: + haystack: + denom: uakt + amount: 500 + +deployment: + haystack: + westcoast: + profile: haystack + count: 1 +``` + +--- + + + +## **SDL Breakdown** + +1. **`services.haystack`**: + - Specifies the **Docker image** (`deepset/haystack:latest`) to deploy. + - Sets environment variables: + - `WORKERS`: Defines the number of workers handling requests. + - `DEBUG`: Enables debugging mode for troubleshooting. + - Exposes port `8000` (mapped to `80`) for external HTTP access. + +2. **`profiles.compute.haystack`**: + - Allocates **resources** for the container: + - `CPU`: 1 unit. + - `Memory`: 2GB RAM. + - `Storage`: 5GB disk space. + +3. **`profiles.placement`**: + - Defines deployment attributes such as **region** and **pricing**. + - This example deploys in the `us-west` region and sets a cost of `500 uakt`. + +4. **`deployment`**: + - Ties the compute profile and placement configuration together. + - Deploys one replica (`count: 1`) of the Haystack service. + +--- + +## **Deployment Steps** + +1. **Validate the SDL**: + Save the SDL as `deploy.yaml` and validate it using the Akash CLI: + ```bash + akash tx deployment create deploy.yaml --from + ``` + +2. **Bid and Lease**: + Once your deployment is created, monitor and accept a bid for your deployment: + ```bash + akash query market lease list --state open + ``` + + After finding a suitable bid, create a lease: + ```bash + akash tx market lease create --dseq --oseq --gseq --from + ``` + +3. **Access Haystack**: + After successful deployment, retrieve the service’s IP and port: + ```bash + akash provider service-status + ``` + + Access the Haystack REST API at the provided IP/port, typically accessible via `http://:80`. + +4. **Test the Deployment**: + Verify that Haystack is running by querying the API: + ```bash + curl http://:80/health + ``` + +--- + +## **Next Steps** + +- **Custom Pipelines**: + Configure Haystack pipelines to index documents or set up specific search functionalities. + +- **Persistent Storage**: + Modify the SDL to use persistent volumes if you need data to persist across restarts. + +- **Scaling**: + Adjust the `count` parameter or resource allocation in the SDL to scale Haystack as needed. + +--- + +## **Conclusion** + +Deploying **Haystack** on Akash offers a decentralized and cost-effective way to leverage the power of NLP-driven search systems. By combining Haystack's flexibility with Akash's decentralized infrastructure, you can deploy scalable and secure AI applications in a production-ready environment. \ No newline at end of file diff --git a/src/content/Docs/guides/machine-learning/keras/index.md b/src/content/Docs/guides/machine-learning/keras/index.md new file mode 100644 index 00000000..fb0c9845 --- /dev/null +++ b/src/content/Docs/guides/machine-learning/keras/index.md @@ -0,0 +1,143 @@ +--- +categories: ["Guides"] +tags: ["AI/ML", "Training", "Framework"] +weight: 1 +title: "Deploying Keras on Akash Network" +linkTitle: "Keras" +--- + + +This guide provides a step-by-step process for deploying a Keras-based application on the Akash Network. Akash Network is a decentralized cloud computing platform that enables developers to deploy applications affordably and efficiently using a blockchain-based infrastructure. + +--- + +## **Overview of Keras** +Keras is a high-level neural networks API, written in Python and capable of running on top of TensorFlow, CNTK, or Theano. It is widely used for building and deploying deep learning models due to its ease of use, scalability, and compatibility with various backends. + +Typical use cases for deploying Keras applications include: +- Predictive analytics +- Image and video processing +- Natural language processing +- Recommender systems + +When deploying on Akash, Keras applications can leverage the platform's decentralized compute resources, reducing costs and ensuring scalability for production workloads. + +--- + + + +## **Step-by-Step Guide: Deploying Keras on Akash** + +### **Step 1: Prepare Your Keras Application** +1. **Develop Your Application**: Ensure your Keras application is container-ready. For example, it should be structured as a Python script or Jupyter Notebook, compatible with TensorFlow or any required backend. +2. **Dependencies**: + - Install required libraries (e.g., `keras`, `tensorflow`, etc.). + - Define dependencies in a `requirements.txt` file for easy installation. +3. **Save Models**: + - Export your Keras model to a file (e.g., `model.h5`) for production use. + - Include a script for loading and serving the model (e.g., via Flask or FastAPI). + +### **Step 2: Containerize the Application** +1. **Create a Dockerfile**: Write a Dockerfile to package your application and its dependencies. Example: + ```Dockerfile + FROM python:3.9-slim + + # Install dependencies + RUN pip install --no-cache-dir -U pip && \ + pip install flask keras tensorflow + + # Copy application files + COPY app.py /app/ + COPY model.h5 /app/ + + WORKDIR /app + + # Run the application + CMD ["python", "app.py"] + ``` +2. **Build the Docker Image**: + ```bash + docker build -t keras-app . + ``` +3. **Test Locally**: + - Run the container locally to verify it works as expected: + ```bash + docker run -p 5000:5000 keras-app + ``` + - Access the app at `http://localhost:5000`. + +### **Step 3: Write an SDL File for Akash Deployment** +The SDL (Stack Definition Language) file defines the deployment configuration for your Keras application on Akash. + +Here’s an example SDL file: + +```yaml +--- +version: "2.0" + +services: + keras-service: + image: /keras-app:latest + expose: + - port: 5000 + as: 80 + to: + - global: true + +profiles: + compute: + keras-compute: + resources: + cpu: + units: 1 + memory: + size: 2Gi + storage: + size: 5Gi + placement: + akash: + pricing: + keras-compute: + denom: uakt + amount: 100 +deployment: + keras-deployment: + akash: + profile: keras-compute + count: 1 +``` + +### **Step 4: Deploy on Akash** +1. **Install Akash CLI**: + - Follow the [Akash CLI Installation Guide](https://akash.network/docs/deployments/akash-cli/overview/). +2. **Create an Account**: + - Generate a wallet and fund it with AKT tokens. +3. **Upload Your SDL File**: + - Deploy using the Akash CLI: + ```bash + akash tx deployment create deployment.yaml --from + ``` + - Monitor your deployment status with: + ```bash + akash query deployment list --owner + ``` + +### **Step 5: Access Your Keras Application** +- Once the deployment is active, obtain the endpoint for your service using: + ```bash + akash query provider lease-status --id + ``` +- Access your application via the provided public endpoint. + +--- + +## **Best Practices for Deployment** +- **Optimize Your Docker Image**: Use lightweight base images and minimize unnecessary dependencies. +- **Monitor Resource Usage**: Choose appropriate resource profiles in the SDL file to balance cost and performance. +- **Secure API Access**: Use HTTPS and authentication to secure your deployed application. +- **Autoscaling**: Consider scaling resources if your application experiences high traffic. + +--- + +## **Conclusion** +Deploying Keras applications on Akash Network allows you to leverage a cost-effective, decentralized cloud platform. By following this guide, you can deploy, scale, and manage your machine learning workloads efficiently. For advanced configurations, refer to Akash's [documentation](https://docs.akash.network/) and the [Keras API reference](https://keras.io/api/). \ No newline at end of file diff --git "a/src/content/Docs/guides/machine-learning/langflow/index.md\n" "b/src/content/Docs/guides/machine-learning/langflow/index.md\n" new file mode 100644 index 00000000..bc4b70a4 --- /dev/null +++ "b/src/content/Docs/guides/machine-learning/langflow/index.md\n" @@ -0,0 +1,133 @@ +--- +categories: ["Guides"] +tags: ["Machine Learning","NLP"] +weight: 1 +title: "Guide to Deploying LangFlow on Akash" +linkTitle: "LangFlow" +--- + + + +LangFlow is a powerful, user-friendly platform designed to streamline the creation and deployment of natural language processing (NLP) models. It simplifies interactions with large language models, enabling developers to experiment with and deploy cutting-edge AI solutions without extensive coding. LangFlow supports integrations with multiple language models, providing an interactive interface for building and managing workflows in NLP applications. + +Key features: +- **Interactive UI:** Build, edit, and deploy NLP workflows visually. +- **Flexible Deployment:** Easily host and manage applications in the cloud. +- **Open Source:** Powered by the community and open for customization. +- **Scalability:** Deploy LangFlow on platforms like Akash for cost-effective, scalable solutions. + +By leveraging the decentralized cloud hosting capabilities of Akash, LangFlow can be deployed in a highly available, cost-efficient environment. + +--- + +## Step-by-Step Guide to Deploy LangFlow on Akash + +### Prerequisites + +1. **Akash CLI:** Ensure you have the Akash CLI installed and configured on your local machine. +2. **Akash Wallet:** Create a wallet and fund it with sufficient AKT tokens. +3. **Deployment Configuration File (SDL):** Use the provided sample SDL for deploying LangFlow. +4. **Docker Image:** The LangFlow Docker image to be used is `langflowai/langflow`. + +--- + +### Deployment Steps + +#### Step 1: Write the SDL File + +Below is the sample SDL file for deploying LangFlow: + +``` +version: "2.0" + +services: + langflow: + image: langflowai/langflow + expose: + - port: 7860 + as: 80 + to: + - global: true + env: + LANGFLOW_HOST: "0.0.0.0" + LANGFLOW_PORT: "7860" + resources: + cpu: + units: 500m + memory: + size: 512Mi + storage: + size: 1Gi + +profiles: + compute: + langflow: + resources: + cpu: + units: 500m + memory: + size: 512Mi + storage: + size: 1Gi + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - "akash.network" + pricing: + langflow: + denom: uakt + amount: 1000 + +deployment: + langflow: + westcoast: + profile: langflow + count: 1 +``` + +#### Step 2: Validate and Deploy the SDL + +1. **Validate the SDL file:** + ```bash + akash tx deployment create deploy.yaml --from --node + ``` + Replace `` with your wallet name and `` with the node you are connected to. + +2. **Query deployment status:** + Once submitted, query the deployment status to ensure it is successfully created: + ```bash + akash query deployment list --owner + ``` + +3. **Accept the Lease:** + Once the deployment is matched with a provider, accept the lease: + ```bash + akash tx market lease create --dseq --from + ``` + +#### Step 3: Access LangFlow + +1. **Get the Deployment URL:** + After accepting the lease, Akash will provide a public IP or hostname. Use it to access the LangFlow interface. + +2. **Access LangFlow:** + Open a browser and navigate to `http://`. You should see the LangFlow interface running. + +--- + +## Customizing LangFlow Configuration + +If needed, you can modify the environment variables in the SDL to customize the deployment, such as: +- `LANGFLOW_HOST`: Host address to bind the application. +- `LANGFLOW_PORT`: Port to expose the application. + +Adjust resource allocations in the `resources` section based on your workload requirements. + +--- + +## Conclusion + +By deploying LangFlow on Akash, you leverage a decentralized, scalable, and cost-efficient cloud platform for hosting NLP workflows. With the sample SDL provided, the deployment process is straightforward, enabling developers to focus on building and experimenting with AI applications without worrying about infrastructure complexity. \ No newline at end of file diff --git a/src/content/Docs/guides/machine-learning/localai/index.md b/src/content/Docs/guides/machine-learning/localai/index.md new file mode 100644 index 00000000..78fd743c --- /dev/null +++ b/src/content/Docs/guides/machine-learning/localai/index.md @@ -0,0 +1,152 @@ +--- +categories: ["Guides"] +tags: ["Machine Learning","LLMs"] +weight: 1 +title: "Guide to Deploying **LocalAI** on Akash" +linkTitle: "LocalAI" +--- + +**LocalAI** is an open-source, self-hosted alternative to OpenAI's APIs, providing users with the capability to run LLMs (Large Language Models) locally without relying on external services. It is a lightweight solution designed for privacy-focused deployments, allowing developers to leverage AI features for applications, chatbots, and more without the cost or latency of cloud-based solutions. LocalAI is compatible with various LLMs such as GPT models and can be tailored to fit specific application needs. + +--- + +## Key Features of LocalAI +1. **Privacy**: LocalAI processes all data locally, ensuring sensitive data doesn't leave your infrastructure. +2. **Cost-Effective**: Avoid expensive API fees from cloud providers. +3. **Flexibility**: Supports multiple model formats, making it easy to fine-tune for specific use cases. +4. **Open Source**: Customizable and transparent. + +--- + +## Why Deploy LocalAI on Akash? +**Akash** is a decentralized cloud computing platform where developers can deploy applications at a fraction of the cost compared to traditional providers. By deploying LocalAI on Akash, you combine the privacy and flexibility of LocalAI with the decentralized, cost-effective infrastructure of Akash. + +--- + +## Step-by-Step Deployment Guide + +### Prerequisites +1. **Akash Account**: Create an account on [Akash Network](https://akash.network/) and set up your wallet. +2. **Akash CLI**: Install the Akash CLI for managing deployments. +3. **SDL File**: Use the sample SDL file provided below or modify it based on your requirements. +4. **Docker Knowledge**: Familiarity with containerized applications. +5. **LocalAI Image**: Access to the LocalAI Docker image (e.g., `localai/localai:latest`). + +--- + +## Sample SDL File for LocalAI Deployment +The following SDL file is a template for deploying LocalAI on Akash: + +```yaml +version: "2.0" + +services: + localai: + image: localai/localai:latest + env: + - MODEL_PATH=/models + expose: + - port: 8080 + as: 80 + to: + - global: true + resources: + cpu: + units: 1000m + memory: + size: 2Gi + storage: + size: 10Gi + args: + - "serve" + - "--host" + - "0.0.0.0" + - "--port" + - "8080" + +profiles: + compute: + localai: + resources: + cpu: + units: 1000m + memory: + size: 2Gi + storage: + size: 10Gi + placement: + akash: + attributes: + host: akash + signedBy: + anyOf: + - "akash1..." + pricing: + localai: + denom: uakt + amount: 100 + +deployment: + localai: + akash: + profile: localai + count: 1 +``` + +--- + +## Steps to Deploy + +### 1. **Set Up Akash CLI** +- Install the Akash CLI from the [Akash documentation](docs/deployments/akash-cli/overview/). +- Configure your wallet and ensure sufficient funds for deployment. + +### 2. **Prepare the SDL File** +- Save the above SDL file as `deploy.yaml`. +- Adjust resources (CPU, memory, and storage) and pricing as necessary for your application needs. + +### 3. **Validate the SDL File** +Run the following command to validate your SDL file: +```bash +akash tx deployment create deploy.yaml --from --chain-id --node +``` + +### 4. **Create the Deployment** +Submit your deployment request: +```bash +akash tx deployment create deploy.yaml --from +``` + +### 5. **Monitor the Deployment** +Check the status of your deployment using: +```bash +akash query deployment list --owner +``` + +### 6. **Access the Application** +- After the deployment is complete, the LocalAI API will be accessible at the exposed endpoint. +- If you’ve made the service global in the SDL file, use the assigned domain or IP to interact with the LocalAI API. + +### 7. **Upload Your Models** +- Use Akash's persistent storage to upload your AI models to `/models` as defined in the `MODEL_PATH` environment variable. + +--- + +## Post-Deployment Configuration +1. **Test the API**: + - Use tools like `curl` or Postman to send requests to your LocalAI API. + - Example: + ```bash + curl -X POST http:///api/v1/chat -d '{"message": "Hello AI"}' + ``` + +2. **Scale Your Deployment**: Modify the `count` parameter in the deployment profile to increase the number of LocalAI instances. + +3. **Optimize Resources**: Based on usage, tweak CPU, memory, and storage allocations in the SDL file. + +--- + +## Conclusion +By deploying **LocalAI** on **Akash**, you gain access to a secure, cost-effective, and scalable environment for running AI models. This deployment is ideal for developers and organizations looking for an affordable and private AI solution. + +For further customization and advanced deployment options, refer to the [LocalAI GitHub repository](https://github.com/localAI) and [Akash Network documentation](https://akash.network/docs). \ No newline at end of file diff --git a/src/content/Docs/guides/machine-learning/ml-envs/index.md b/src/content/Docs/guides/machine-learning/ml-envs/index.md new file mode 100644 index 00000000..06bed407 --- /dev/null +++ b/src/content/Docs/guides/machine-learning/ml-envs/index.md @@ -0,0 +1,94 @@ +--- +categories: ["Guides"] +tags: ["AI/ML", "Training", "Framework"] +weight: 1 +title: "Machine Learning Environments" +linkTitle: "Machine Learning Environments" +--- + +## **Overview** + +The **nielsborie/machine-learning-environments** Docker image provides a preconfigured environment for machine learning development, containing tools and frameworks such as TensorFlow, PyTorch, Jupyter Notebook, and more. Deploying it on **Akash**, a decentralized cloud computing platform, will allow you to host and utilize this machine-learning environment at a lower cost and with high scalability. + +--- + +## **Steps to Deploy on Akash** + +### 1. **Set Up Akash Environment** + - Install the Akash CLI (`akash`). + - Fund your wallet with $AKT tokens to cover deployment costs. + +### 2. **Write the SDL File** + Create a deployment SDL (Service Definition Language) file to describe the service. Below is an example SDL tailored for deploying the `nielsborie/machine-learning-environments` Docker image: + + ```yaml + version: "2.0" + services: + machine-learning: + image: nielsborie/machine-learning-environments + expose: + - port: 8888 # For Jupyter Notebook + as: 80 + to: + - global: true + - port: 6006 # TensorBoard + as: 6006 + to: + - global: true + profiles: + compute: + machine-learning: + resources: + cpu: + units: 2 # Number of CPU units + memory: + size: 4Gi # Memory allocation + storage: + size: 20Gi # Storage for ML datasets + placement: + akash: + attributes: + host: akash + pricing: + machine-learning: + denom: uakt + amount: 1000 # Cost in uAKT per block + deployment: + machine-learning: + akash: + profile: machine-learning + count: 1 + ``` + +### 3. **Upload Datasets or Notebooks** + Use Akash’s persistent storage options or integrate an external cloud storage solution (e.g., S3-compatible storage) to store your datasets or ML notebooks. + +### 4. **Submit Deployment** + - Deploy the environment using the Akash CLI: + ```bash + akash tx deployment create .yaml --from + ``` + - Verify your deployment: + ```bash + akash query deployment list --owner + ``` + +### 5. **Access the Environment** + - Once deployed, you’ll get the **endpoint URL**. Use this to access Jupyter Notebook or other tools within the container. + - For Jupyter, open your browser and navigate to `http://:80`. + +### 6. **Monitor and Scale** + - Use Akash CLI or Akashlytics dashboard to monitor resource usage. + - Scale the environment by modifying the SDL file and redeploying. + +--- + +## **Benefits of Akash Deployment** +1. **Cost-Effective**: Decentralized compute is generally cheaper than traditional cloud platforms. +2. **Customizability**: Modify the SDL file to adjust resources or add services as needed. +3. **Scalability**: Add nodes or scale resources easily. +4. **Decentralization**: Leverage Akash's censorship-resistant infrastructure for hosting ML workloads. + +--- + +This deployment provides a fully operational machine-learning environment accessible from any browser while taking advantage of Akash's decentralized infrastructure for cost savings and flexibility. \ No newline at end of file diff --git a/src/content/Docs/guides/machine-learning/pytorch/index.md b/src/content/Docs/guides/machine-learning/pytorch/index.md new file mode 100644 index 00000000..3f5d9c83 --- /dev/null +++ b/src/content/Docs/guides/machine-learning/pytorch/index.md @@ -0,0 +1,178 @@ +--- +categories: ["Guides"] +tags: ["Machine Learning"] +weight: 1 +title: "PyTorch" +linkTitle: "PyTorch" +--- + + + +Akash Network is a decentralized cloud platform that enables developers to deploy containerized applications, including machine learning frameworks like PyTorch. This guide will help you deploy PyTorch on Akash using its official Docker image. + +--- + +## **Prerequisites** +1. **Akash CLI Installed**: Ensure the Akash CLI is installed and configured. Follow the [official guide](https://docs.akash.network/) for installation. +2. **Akash Wallet**: Fund your wallet with AKT tokens for deployment. +3. **Docker Knowledge**: Basic understanding of Docker and containerization. +4. **SDL Template**: Akash uses SDL files for defining deployments. +5. **PyTorch Docker Image**: Use the official [PyTorch Docker image](https://hub.docker.com/r/pytorch/pytorch). + +--- + +## **Step 1: Define Your SDL File** + +Create an SDL file (`deploy.yaml`) for your deployment. Here's an example configuration: + +``` +--- +version: "2.0" + +services: + pytorch: + image: pytorch/pytorch:latest # Replace with the desired PyTorch tag + args: + - "bash" # Start a bash shell for interaction + env: + - TZ=UTC # Set timezone (optional) + expose: + - port: 8888 # Port for Jupyter Notebook or API access + as: 8888 + to: + - global + resources: + cpu: + units: 2.0 # Adjust CPU units + memory: + size: 4Gi # Adjust memory size + storage: + size: 10Gi # Adjust storage size + +profiles: + compute: + pytorch: + match: + - "provider=akash" + resources: + cpu: + units: 2.0 + memory: + size: 4Gi + storage: + size: 10Gi + placement: + westcoast: + attributes: + region: us-west + pricing: + pytorch: + denom: uakt + amount: 5000 # Adjust based on your budget + +deployment: + pytorch_deployment: + westcoast: + profile: pytorch + count: 1 +``` + +--- + +## **Step 2: Deploy on Akash** + +1. **Initialize Deployment**: + ``` + akash tx deployment create deploy.yaml --from --node --chain-id + ``` + Replace ``, ``, and `` with your Akash configuration. + +2. **Bid for Resources**: + Once the deployment is created, providers will bid to host it. Run the following command to view the bids: + ``` + akash query market bid list --owner + ``` + +3. **Lease Selection**: + Accept a bid to create a lease: + ``` + akash tx market lease create --dseq --gseq 1 --oseq 1 --from + ``` + +4. **Check Deployment Status**: + After creating the lease, check the status of your deployment: + ``` + akash query deployment get --owner --dseq + ``` + +--- + +## **Step 3: Access Your PyTorch Deployment** + +1. **Retrieve Deployment Details**: + Obtain the external IP and port assigned to your deployment: + ``` + akash provider lease-status --dseq --from + ``` + +2. **Connect to the Service**: + - If running a Jupyter Notebook, open a browser and navigate to `http://:8888`. + - If exposing a REST API or serving a model, use the appropriate endpoint. + +--- + +## **Step 4: Verify PyTorch** + +1. SSH into the container: + ``` + ssh -p root@ + ``` + (Use the credentials provided by the provider.) + +2. Start a Python shell and test PyTorch: + ``` + python + >>> import torch + >>> print(torch.__version__) + ``` + +--- + +## **Optional: Customize the Docker Container** + +If you need additional libraries or custom configurations: +1. Create a custom `Dockerfile`: + ```dockerfile + FROM pytorch/pytorch:latest + RUN pip install flask gunicorn # Example: Add Flask and Gunicorn + WORKDIR /workspace + COPY . /workspace + CMD ["bash"] + ``` + +2. Build and push your custom image: + ``` + docker build -t /custom-pytorch:latest . + docker push /custom-pytorch:latest + ``` + +3. Update the `image` field in the SDL file to use your custom image. + +--- + +## **Step 5: Manage and Scale Deployment** + +1. **Update Deployment**: + Modify the `deploy.yaml` file and run: + ``` + akash tx deployment update deploy.yaml --from + ``` + +2. **Stop Deployment**: + ``` + akash tx deployment close --owner --dseq + ``` + +--- + +By following this guide, you can successfully deploy PyTorch on Akash, leveraging the decentralized cloud for your machine learning tasks. \ No newline at end of file diff --git a/src/content/Docs/guides/machine-learning/skypilot/index.md b/src/content/Docs/guides/machine-learning/skypilot/index.md new file mode 100644 index 00000000..48c6e17f --- /dev/null +++ b/src/content/Docs/guides/machine-learning/skypilot/index.md @@ -0,0 +1,162 @@ +--- +categories: ["Guides"] +tags: ["AI & ML"] +weight: 1 +title: "SKypilot" +linkTitle: "Skypilot" +--- + +[SkyPilot ](https://docs.skypilot.co/en/latest/docs/index.html) is a framework for running AI and batch workloads on any infra, offering unified execution, high cost savings, and high GPU availability. + +Here’s a step-by-step guide to creating a Akypilot Docker image and deploying it on Akash. + +## Creating the Docker Image + +### Step 1: Clone the Repository + +1. Open your terminal and clone the SkyPilot repository: + +``` +git clone https://github.com/skypilot-org/skypilot.git +cd skypilot +``` + +2. Review the repo structure to understand what files and scripts are present: + +``` +ls -la +``` + +### Step 2: Create a Dockerfile + +1. Inside the `skypilot` directory, create a `Dockerfile`: + +``` +touch Dockerfile +``` + +2. Open the file in your favorite text editor and add the following instructions: + +``` +# Use an official Python image as the base +FROM python:3.10-slim + +# Set the working directory +WORKDIR /app + +# Copy the SkyPilot source code into the container +COPY . /app + +# Install dependencies +RUN pip install --no-cache-dir -r requirements.txt + +# Expose the necessary port (if applicable) +EXPOSE 8000 + +# Define the command to run your application +CMD ["python", "skypilot.py"] +``` + +Adjust the `CMD` line based on the entry point for the application. + + +### Step 3: Build the Docker Image + +1. Build the image using Docker: + +``` +docker build -t skypilot-app . +``` +2. Verify the image is built successfully: + +``` +docker images +``` +### Step 4: Push the Docker Image to a Registry + +1. Log in to a container registry (e.g., Docker Hub, GitHub Packages, or others): + +``` +docker login +``` +2. Tag the Docker image: + +``` +docker push /skypilot-app:latest +``` + +### Step 5: Deploy on Akash + +**Prepare the SDL File** + +1. Use the SDL template below to create a `deploy.yaml` file: + +``` +version: "2.0" + +services: + web: + image: /skypilot-app:latest + env: + - PORT=8000 + expose: + - port: 8000 + as: 80 + accept: + - 0.0.0.0/0 + to: + - global +profiles: + compute: + web: + resources: + cpu: + units: 1.0 + memory: + size: 512Mi + storage: + size: 1Gi + placement: + akash: + attributes: + region: us-west + signedBy: + anyOf: + - "akash" + pricing: + web: + denom: uakt + amount: 100 +deployment: + web: + akash: + profile: web + count: 1 + +``` + +2. Replace `` with your actual Docker Hub username or the registry URL. + + + +**Deploy Using Akash CLI** + +1. Install the Akash CLI if you haven’t already: + +``` +curl https://raw.githubusercontent.com/ovrclk/akash/master/godownloader.sh | sh +``` + +2. Authenticate and fund your Akash wallet with testnet or mainnet tokens. + +3. Create a deployment using the SDL file: + +``` +akash tx deployment create --from --node --fees +``` +4. Monitor the status of the deployment: + +``` +akash query deployment list --owner +``` +5. Access the application using the assigned endpoint once the deployment is active. diff --git a/src/content/Docs/guides/machine-learning/tensorflow/index.md b/src/content/Docs/guides/machine-learning/tensorflow/index.md new file mode 100644 index 00000000..2f2c08c9 --- /dev/null +++ b/src/content/Docs/guides/machine-learning/tensorflow/index.md @@ -0,0 +1,130 @@ +--- +categories: ["Guides"] +tags: ["AI/ML", "Training", "Framework"] +weight: 1 +title: "Guide to Deploy TensorFlow on Akash Networkn" +linkTitle: "TensorFlow" +--- + + + +This guide will walk you through the steps to deploy TensorFlow on the Akash Network using its official Docker image. The Akash Network is a decentralized cloud computing marketplace, ideal for running AI/ML workloads in a cost-effective and scalable manner. + +## **Overview of TensorFlow on Akash** + +TensorFlow is an open-source machine learning platform used for building and deploying ML models. Running TensorFlow on Akash leverages the decentralized cloud to: +- Reduce infrastructure costs. +- Enable scalable, distributed training and inference. +- Avoid dependency on centralized cloud providers. + +Akash provides GPU and CPU instances to handle TensorFlow workloads, making it ideal for AI/ML applications. + +--- + +## **Prerequisites** +1. **Install Akash CLI:** Ensure you have the Akash CLI installed and configured. Refer to the [Akash documentation](https://docs.akash.network) for setup instructions. +2. **Akash Tokens:** Acquire Akash tokens (AKT) to pay for compute resources. +3. **Dockerized TensorFlow:** Use the official TensorFlow Docker image from Docker Hub. +4. **Domain Configuration (Optional):** If you want to expose the service via a domain, configure DNS appropriately. + +--- + +## **Step-by-Step Guide** + +### **1. Prepare the SDL File** +The SDL (Stack Definition Language) file defines the deployment configuration for Akash. Below is an example for TensorFlow: + +``` +version: "2.0" + +services: + tensorflow-service: + image: tensorflow/tensorflow:latest # Official TensorFlow Docker image + expose: + - port: 8501 # TensorFlow Serving default port + as: 80 + to: + - global + +profiles: + compute: + tensorflow-profile: + resources: + cpu: + units: 1 # Adjust according to your workload + memory: + size: 2Gi # Adjust memory size + storage: + size: 5Gi # Persistent storage size for model files + + placement: + tensorflow-deployment: + attributes: + region: us-west # Specify the region + signedBy: + anyOf: + - akash.network + pricing: + tensorflow-profile: + denom: uakt + amount: 500 # Set the bid price (in uAKT) + +deployment: + tensorflow-deployment: + tensorflow-profile: + count: 1 +``` + +--- + +### **2. Deploy to Akash** +1. **Initialize Deployment:** + ``` + akash tx deployment create deploy.yaml --from --chain-id --node + ``` + +2. **Bid and Accept Lease:** + After submitting the deployment, monitor the bid and accept the lease once a provider is found: + ``` + akash query market bid list + akash tx market lease create --dseq --gseq --oseq --provider --from + ``` + +3. **Verify Deployment:** + Check the status of your deployment: + ``` + akash query deployment get --dseq + ``` + +--- + +### **3. Access TensorFlow Service** +- Once the deployment is active, note the provider's IP address or hostname. +- Access TensorFlow Serving using the specified port (default is `8501`). + +For example: +``` +curl http://:80/v1/models/my_model:predict -d '{"instances": [[1.0, 2.0, 5.0]]}' +``` + +--- + +## **Best Practices** +1. **Resource Scaling:** Optimize `cpu` and `memory` values based on your workload. Use higher resources for training or complex models. +2. **Persistent Storage:** Configure storage volumes if your TensorFlow models require saving/loading data frequently. +3. **Security:** Secure API endpoints with appropriate authentication methods. +4. **Monitoring:** Integrate logs and monitoring tools to track service performance. + +--- + +## **Example Use Cases** +- **Model Training:** Leverage Akash for cost-effective distributed training. +- **Inference Service:** Deploy TensorFlow Serving to handle ML inference requests. +- **Research:** Utilize decentralized infrastructure for ML experiments. + +--- + +## **Conclusion** +By deploying TensorFlow on Akash, you gain access to affordable, decentralized cloud resources while maintaining high performance and scalability. Follow this guide to deploy your TensorFlow workloads seamlessly on Akash. + +For more advanced configurations or issues, consult the [Akash Documentation](docs/) or TensorFlow's [official Docker repository](https://hub.docker.com/r/tensorflow/tensorflow). \ No newline at end of file diff --git a/src/content/Docs/guides/machine-learning/tensorlayer/index.md b/src/content/Docs/guides/machine-learning/tensorlayer/index.md new file mode 100644 index 00000000..f48bd7de --- /dev/null +++ b/src/content/Docs/guides/machine-learning/tensorlayer/index.md @@ -0,0 +1,141 @@ +--- +categories: ["Guides"] +tags: ["AI/ML", "Training", "Framework"] +weight: 1 +title: "Deploy TensorLayer on Akash Network" +linkTitle: "TensorLayer" +--- + + +This guide will walk you through the steps to deploy **TensorLayer** on the Akash Network using the official Docker image. TensorLayer is a versatile deep learning library built on top of TensorFlow. With Akash, you can deploy and run TensorLayer workloads in a decentralized and cost-effective manner. + +--- + +## **Prerequisites** +1. **Install Akash CLI** + Ensure you have the Akash CLI installed and configured. Follow the official guide to set up your Akash environment: + [Akash CLI Documentation](docs/deployments/overview/). + +2. **Akash Account** + Ensure you have an Akash wallet funded with sufficient AKT tokens. + +3. **Docker Image** + We will use the official TensorLayer Docker image: + ``` + docker pull tensorlayer/tensorlayer + ``` + +4. **Create an SDL File** + SDL (Stack Definition Language) is used to describe your deployment configuration on Akash. + +--- + +## **Steps to Deploy TensorLayer on Akash** + +### **1. Create an SDL Template** +Create a file called `deploy.yaml` and define your deployment parameters. Below is an example configuration: + +``` +--- +version: "2.0" + +services: + tensorlayer-service: + image: tensorlayer/tensorlayer:latest + expose: + - port: 8888 + as: 8888 + to: + - global: true + env: + - PYTHONUNBUFFERED=1 + args: + - "python3" + - "-m" + - "tensorlayer" + resources: + cpu: + units: 1 + memory: + size: 512Mi + storage: + size: 1Gi + +profiles: + compute: + tensorlayer-profile: + match: + attributes: + region: us-west + resources: + cpu: + units: 1 + memory: + size: 512Mi + storage: + size: 1Gi + + placement: + global-placement: + pricing: + tensorlayer-profile: + denom: uakt + amount: 100 + +deployment: + tensorlayer-deployment: + tensorlayer-profile: + - global-placement +``` + +### **2. Deploy the SDL File** +Run the following commands to deploy the SDL file to Akash: + +``` +# Create a deployment +akash tx deployment create deploy.yaml --from --node https://rpc.akash.network:26657 --chain-id akashnet-2 + +# Confirm deployment +akash query deployment list --owner --node https://rpc.akash.network:26657 +``` + +### **3. Check the Deployment Status** +Use the following command to check the status of your deployment: + +``` +akash query deployment get --node https://rpc.akash.network:26657 +``` + +### **4. Access the TensorLayer Service** +Once the deployment is successfully running, Akash will provide a public IP address and port. Access TensorLayer via the browser or a tool like `curl`: + +``` +http://:8888 +``` + +--- + +## **Overflow of the Product** +1. **Use Case**: TensorLayer is perfect for building and training AI models in a decentralized environment. Akash allows you to scale computation resources cost-effectively. + +2. **Product Flow**: + - **TensorLayer Setup**: TensorLayer runs on the containerized infrastructure provided by Akash. + - **Environment Configurations**: Customize the Docker container by injecting environment variables, Python scripts, or Jupyter notebooks for your AI workflows. + - **AI Model Deployment**: Deploy AI models directly on TensorLayer and make them accessible through Akash's globally distributed nodes. + +3. **Advantages**: + - Decentralized infrastructure reduces costs compared to traditional cloud providers. + - High availability across Akash’s distributed network. + - Fully customizable deployment using Docker and SDL. + +4. **Potential Use Cases**: + - Model training and inference for natural language processing, image recognition, or predictive analytics. + - Decentralized AI services for applications like chatbots, recommendation systems, and real-time analytics. + +--- + +## **Additional Notes** +- For advanced deployments, integrate persistent storage for large datasets. +- Monitor resource usage using Akash's metrics and update your deployment profile as needed. + +With this guide, you can deploy TensorLayer on Akash and leverage its decentralized infrastructure for cost-efficient AI workloads. diff --git a/src/content/Docs/guides/science/geonetwork/index.md b/src/content/Docs/guides/science/geonetwork/index.md new file mode 100644 index 00000000..1c47bdf5 --- /dev/null +++ b/src/content/Docs/guides/science/geonetwork/index.md @@ -0,0 +1,135 @@ +--- +categories: ["Guides"] +tags: ["Cataloging"] +weight: 1 +title: "GeoNetwork" +linkTitle: "GeoNetwork" +--- + + +## Introduction to GeoNetwork +GeoNetwork is an open-source cataloging tool designed to manage spatially referenced resources. It provides a robust platform for geospatial metadata management, enabling organizations to share, find, and use spatial data. Key features include: + +- **Metadata Management:** Create, edit, and manage geospatial metadata. +- **Data Discovery:** Search and access spatial datasets via an intuitive interface. +- **Interoperability:** Supports OGC standards such as CSW, WMS, WFS, and WCS. +- **Integration:** Seamless integration with GIS platforms and web services. + +GeoNetwork is widely used by governments, research institutions, and organizations that need to catalog and disseminate spatial data. + +--- + +## Deployment on Akash + +### Step 1: Prerequisites +1. **Akash Account and Wallet:** Ensure you have an Akash account and wallet set up with sufficient AKT tokens. +2. **Akash CLI:** Install and configure the Akash CLI tool on your machine. +3. **GeoNetwork Docker Image:** Use the official GeoNetwork Docker image from Docker Hub (`geonetwork:latest`). + +--- + +### Step 2: Create a Deployment SDL File +The SDL (Service Definition Language) file defines the deployment. Here's a sample SDL file for GeoNetwork: + +``` +version: "2.0" + +services: + geonetwork: + image: geonetwork:latest + expose: + - port: 8080 + as: 80 + to: + - global + env: + - JAVA_OPTS=-Xms512m -Xmx1024m + resources: + cpu: + units: 0.5 + memory: + size: 1Gi + storage: + size: 5Gi + +profiles: + compute: + geonetwork: + resources: + cpu: + units: 0.5 + memory: + size: 1Gi + storage: + size: 5Gi + placement: + global: + attributes: + region: us-west + +deployment: + geonetwork: + geonetwork: + profile: geonetwork + count: 1 +``` + +### Explanation of the SDL File: +- **Services Section:** Specifies the GeoNetwork Docker image and exposes port `8080`. +- **Environment Variables:** Configures the JVM options (`JAVA_OPTS`) for GeoNetwork. +- **Resources:** Allocates 0.5 CPU, 1 GiB of memory, and 5 GiB of storage. +- **Placement:** Specifies the preferred region for deployment. + +--- + +### Step 3: Deploy on Akash +1. **Initialize Deployment:** + Use the Akash CLI to create and deploy the configuration: + ``` + akash tx deployment create deploy.yaml --from + ``` + +2. **View Deployment Status:** + Check the status of your deployment: + ``` + akash query deployment list --owner + ``` + +3. **Accept a Lease:** + Once your deployment is created, accept a provider's lease: + ``` + akash tx market lease create --dseq --from + ``` + +--- + +### Step 4: Access GeoNetwork +After the deployment is live, access GeoNetwork via the external IP provided by the Akash provider. You can use a browser to navigate to: + +``` +http:// +``` + +--- + +### Step 5: Configure GeoNetwork +1. **Login:** The default admin credentials are: + - Username: `admin` + - Password: `admin` +2. **Start Cataloging:** + - Create new metadata records or upload existing ones. + - Configure GeoNetwork to interact with your GIS or data storage solutions. + +--- + +### Step 6: Monitor and Update +- **Logs:** View logs to troubleshoot any issues: + ``` + docker logs + ``` +- **Updates:** To update GeoNetwork, redeploy with the latest Docker image. + +--- + +## Conclusion +Deploying GeoNetwork on Akash provides a decentralized, cost-efficient way to manage and share geospatial data. By leveraging Akash's decentralized cloud, you can ensure scalability and resilience for your geospatial cataloging needs. \ No newline at end of file diff --git a/src/content/Docs/guides/science/matlab/index.md b/src/content/Docs/guides/science/matlab/index.md new file mode 100644 index 00000000..64388d71 --- /dev/null +++ b/src/content/Docs/guides/science/matlab/index.md @@ -0,0 +1,160 @@ +--- +categories: ["Guides"] +tags: ["Numerical Computation"] +weight: 1 +title: "Guide to Deploy MATLAB on Akash" +linkTitle: "MATLAB" +--- + + + +The Akash Network provides a decentralized cloud platform where you can deploy containerized applications. This guide will walk you through deploying MATLAB on Akash using the official MathWorks MATLAB Docker image. + +--- + +## Prerequisites + +1. **MATLAB License**: + - MATLAB requires a valid license. You will need your license server information or network license key for deployment. + +2. **Akash CLI Setup**: + - Install and configure the [Akash CLI](docs/deployments/akash-cli/overview/). + - Fund your Akash wallet with AKT tokens for deployment. + +3. **Docker Knowledge**: + - Familiarity with Docker containers is recommended. + +4. **Access to MATLAB Docker Image**: + - MathWorks provides the MATLAB Docker image on [Docker Hub](https://hub.docker.com/r/mathworks/matlab). + - You need Docker Hub credentials to access the image. + +--- + +## Step 1: Pull the MATLAB Docker Image + +1. Log in to Docker Hub with your credentials: + ``` + docker login + ``` + +2. Pull the MATLAB Docker image: + ``` + docker pull mathworks/matlab:latest + ``` + +--- + +## Step 2: Prepare the SDL File for Akash Deployment + +1. Create an SDL file (`deploy.yml`) for MATLAB. Use the following template: + + ```` + version: "2.0" + + services: + matlab: + image: mathworks/matlab:latest + args: + - --licenseserver : + env: + - MATHWORKS_LICENSE_FILE=: + expose: + - port: 8888 + as: 8888 + to: + - global + resources: + cpu: + units: 500 + memory: + size: 512Mi + storage: + size: 1Gi + + profiles: + compute: + matlab: + resources: + cpu: + units: 500 + memory: + size: 512Mi + storage: + size: 1Gi + + deployment: + matlab: + matlab: + profile: matlab + count: 1 + ``` + + - Replace `` and `` with your MATLAB license server details. + +--- + +## Step 3: Deploy to Akash + +1. **Create a Deployment**: + - Submit your SDL file to Akash: + ``` + akash tx deployment create deploy.yml --from + ``` + +2. **Find a Provider**: + - Query for providers willing to host your deployment: + ``` + akash query market bid list --owner + ``` + +3. **Accept a Lease**: + - Accept a lease from a provider: + ``` + akash tx market lease create --from --dseq + ``` + +4. **Access Your Deployment**: + - Get the public endpoint of your deployment: + ``` + akash query market lease get --dseq + ``` + + - Use the endpoint to access your MATLAB application. + +--- + +## Step 4: Verify MATLAB Deployment + +1. Open a browser and navigate to the public endpoint (e.g., `http://:8888`). +2. Verify that MATLAB is running and functional. + +--- + +## Step 5: Monitor and Manage Deployment + +- Use the Akash CLI to check logs and manage your deployment: + ``` + akash logs --dseq --from + ``` + +- If you need to update or terminate the deployment: + ``` + akash tx deployment close --dseq --from + ``` + +--- + +## Notes + +- **Firewall Configuration**: + Ensure the port `8888` is accessible globally or restrict it based on your needs. + +- **MATLAB Licensing**: + MATLAB requires a valid license at runtime. The deployment will fail if the license is invalid or inaccessible. + +- **Resource Optimization**: + Adjust CPU, memory, and storage in the `resources` section of the SDL file based on your workload. + +--- + +This guide provides a basic setup to deploy MATLAB on Akash. Depending on your specific use case, you can further customize the deployment by modifying the SDL file or integrating additional services. \ No newline at end of file diff --git a/src/content/Docs/guides/science/octave/index.md b/src/content/Docs/guides/science/octave/index.md new file mode 100644 index 00000000..ef997600 --- /dev/null +++ b/src/content/Docs/guides/science/octave/index.md @@ -0,0 +1,132 @@ +--- +categories: ["Guides"] +tags: ["Numerical Computation"] +weight: 1 +title: "Guide to Deploy GNU Octave on Akash" +linkTitle: "GNU Octave" +--- + + + +## Introduction to GNU Octave + +GNU Octave is an open-source, high-level programming language primarily used for numerical computations. It provides a convenient command-line interface for solving linear and nonlinear problems numerically and is highly compatible with MATLAB. GNU Octave is extensively used in scientific research, engineering, data analysis, and machine learning for its capabilities in matrix operations, algorithm development, and data visualization. + +### Features of GNU Octave: +- **Matrix Manipulation**: Handle large-scale matrix computations. +- **Optimization**: Perform optimization using built-in or third-party packages. +- **Simulation**: Create simulations for scientific and engineering processes. +- **Toolboxes**: Compatible with toolboxes for specialized functionality, such as MATPOWER for power system simulations. + +In this guide, we’ll deploy **GNU Octave** on the **Akash Network** using the `matpower/octave` Docker image, which provides a complete GNU Octave environment with optimization packages like IPOPT, OSQP, SeDuMi, and SDPT3. + +--- + +## Prerequisites + +1. **Akash CLI Installed**: Ensure you have the Akash CLI installed and configured. Refer to the [Akash Documentation](docs/deployments/akash-cli/overview/) for setup instructions. +2. **AKT Tokens**: Ensure you have AKT tokens in your wallet for deployment. +3. **Account Setup**: Your Akash account should be set up and ready to deploy workloads. + +--- + +## Steps to Deploy GNU Octave on Akash + +### 1. **Prepare the Deployment YAML** + +Below is an example of the deployment YAML file for running GNU Octave on Akash. This will deploy the `matpower/octave` Docker image. + +``` +--- +version: "2.0" + +services: + octave: + image: matpower/octave + expose: + - port: 8080 + as: 80 + to: + - global: true + +profiles: + compute: + octave: + resources: + cpu: + units: 500m + memory: + size: 512Mi + storage: + size: 1Gi + + placement: + akash: + attributes: + host: akash + pricing: + octave: + denom: uakt + amount: 1000 + +deployment: + octave: + profile: + compute: octave + placement: akash + count: 1 +``` + +### 2. **Deploy Using Akash CLI** + +1. Save the above YAML configuration to a file, e.g., `deploy.yaml`. +2. Run the following commands in your terminal: + +```bash +akash tx deployment create deploy.yaml --from --chain-id +``` + +Replace `` and `` with your Akash wallet name and chain ID. + +3. After the deployment is submitted, check the status: + +```bash +akash query market lease list --owner +``` + +4. Once the lease is established, retrieve the access URL for your deployment: + +```bash +akash provider lease-status --dseq --owner +``` + +### 3. **Access GNU Octave** + +After deployment, the `matpower/octave` environment will be accessible via the assigned IP or domain at the specified port (default: 80). + +### 4. **Run GNU Octave Commands** + +Access the container to use GNU Octave directly: + +1. SSH into the running container (using the provider-assigned access). +2. Launch the Octave CLI by executing: + +```bash +octave +``` + +Alternatively, modify the deployment to include a persistent storage volume if you wish to save your computations or scripts. + +--- + +## Advanced Configuration + +- **Add Persistent Storage**: Update the YAML to include persistent storage to save scripts and data. +- **Scaling**: Adjust the `count` parameter in the `deployment` section to scale your environment. +- **Expose Additional Ports**: Modify the `expose` section to open other ports for services like a web interface. + +--- + +## Conclusion + +This deployment guide helps you run a complete GNU Octave environment on Akash Network using the `matpower/octave` image. By leveraging Akash's decentralized cloud, you can take advantage of low-cost, scalable computing for your numerical and scientific needs. \ No newline at end of file diff --git a/src/content/Docs/guides/tooling/airflow/index.md b/src/content/Docs/guides/tooling/airflow/index.md new file mode 100644 index 00000000..540a5170 --- /dev/null +++ b/src/content/Docs/guides/tooling/airflow/index.md @@ -0,0 +1,117 @@ +--- +categories: ["Guides"] +tags: ["Asynchronous Messaging"] +weight: 1 +title: "Deploying Apache Airflow on Akash: A Step-by-Step Guide" +linkTitle: "Apache Airflow" +--- + + + +## Introduction to Apache Airflow +Apache Airflow is an open-source platform designed for orchestrating workflows. It allows developers to create, schedule, and monitor workflows as directed acyclic graphs (DAGs). Airflow is highly extensible and can be used for a variety of automation tasks. + +### Key Use Cases for Airflow +1. **Data Engineering**: Automating ETL pipelines for data transformation and loading. +2. **Machine Learning Pipelines**: Coordinating training, validation, and deployment of machine learning models. +3. **DevOps**: Managing CI/CD pipelines and system automations. +4. **Analytics**: Scheduling reports and running analytics workflows. +5. **Integration**: Orchestrating tasks across multiple services and APIs. + +--- + +## Prerequisites +1. **Akash CLI**: Ensure the Akash CLI is installed and configured. +2. **Docker Knowledge**: Basic understanding of Docker and images. +3. **Apache Airflow Docker Image**: We'll use the official `apache/airflow` image. +4. **SDL Template**: You can use your pre-built SDL template for deploying applications on Akash. + +--- + +## Steps to Deploy Apache Airflow on Akash + +### 1. **Prepare Your SDL File** +Create a `deploy.yaml` file that describes the resources and configurations for your Airflow deployment. Below is a sample SDL file for deploying Apache Airflow: + +``` +version: "2.0" + +services: + airflow: + image: apache/airflow:latest + expose: + - port: 8080 + as: 80 + to: + - global: true + env: + - AIRFLOW__CORE__EXECUTOR=LocalExecutor + - AIRFLOW__CORE__SQL_ALCHEMY_CONN=sqlite:////usr/local/airflow/airflow.db + args: + - airflow webserver + volumes: + - size: 1Gi + resources: + cpu: + units: 500m + memory: + size: 512Mi + +profiles: + compute: + airflow: + resources: + cpu: + units: 500m + memory: + size: 512Mi + storage: + size: 1Gi + placement: + akash: + pricing: + airflow: + denom: uakt + amount: 100 + +deployment: + airflow: + airflow: + profile: airflow + count: 1 +``` + +### 2. **Customize Airflow Configuration** +- Update environment variables under `env` in the SDL file to suit your needs. +- For a production setup, consider using a database like PostgreSQL instead of SQLite. +- Adjust resource requirements under the `resources` section. + +### 3. **Deploy the SDL File to Akash** +Run the following commands to deploy Airflow on Akash: + +1. **Validate Your SDL File**: + ``` + akash deployment validate deploy.yaml + ``` + +2. **Send the Deployment**: + ``` + akash deployment create deploy.yaml + ``` + +3. **Query the Lease**: + Find the lease created for your deployment: + ``` + akash deployment lease-status --dseq + ``` + +4. **Access Airflow**: + Once the lease is active, you will receive an external IP address and port. Use this to access the Airflow web server in your browser. + +### 4. **Set Up and Test DAGs** +Once Airflow is running, upload your DAGs to the `/dags` directory in the container (use persistent storage or mount a volume). Test workflows to ensure everything is configured properly. + +--- + +## Conclusion +Deploying Apache Airflow on Akash leverages decentralized computing resources, reducing costs while maintaining scalability. By customizing the SDL template, you can deploy Airflow for various use cases, from data engineering to machine learning. diff --git a/src/content/Docs/guides/tooling/emqx/index.md b/src/content/Docs/guides/tooling/emqx/index.md new file mode 100644 index 00000000..8187c2e6 --- /dev/null +++ b/src/content/Docs/guides/tooling/emqx/index.md @@ -0,0 +1,152 @@ +--- +categories: ["Guides"] +tags: ["Messaging", "MQTT"] +weight: 1 +title: "EMQX" +linkTitle: "EMQX" +--- + +This guide walks you through deploying EMQX (Erlang MQTT Broker) on Akash Network using the official Docker image (`emqx`) and an SDL file for deployment. + +--- + +### **Step 1: Prerequisites** + +1. **Akash CLI Setup**: Ensure you have the Akash CLI installed and configured. + - Follow the [Akash CLI guide](docs/deployments/akash-cli/overview/) to set up your wallet and environment. +2. **Docker Hub Account**: Ensure access to the official `emqx` Docker image. +3. **Akash Provider**: Ensure your Akash provider is active for accepting deployments. + +--- + +### **Step 2: Create the SDL File** + +Here’s a sample SDL file (`deploy.yaml`) for deploying EMQX: + +``` +version: "2.0" + +services: + emqx: + image: emqx/emqx:latest + env: + EMQX_NAME: "emq-node" + EMQX_LISTENER__TCP__DEFAULT: "1883" + EMQX_LISTENER__SSL__DEFAULT: "8883" + EMQX_ADMIN_PASSWORD: "admin" # Set your admin password + expose: + - port: 1883 + as: 1883 + to: + - global + - port: 8883 + as: 8883 + to: + - global + resources: + cpu: + units: 0.5 + memory: + size: 512Mi + storage: + size: 1Gi + +profiles: + compute: + emqx: + resources: + cpu: + units: 0.5 + memory: + size: 512Mi + storage: + size: 1Gi + placement: + any-provider: + attributes: + host: akash + signedBy: + anyOf: + - "akash.network" + pricing: + emqx: + denom: uakt + amount: 1000 + +deployment: + emqx: + profile: + compute: emqx + placement: any-provider + count: 1 +``` + +--- + +### **Step 3: Deploy the SDL File** + +1. **Create the Deployment**: + Run the following command to create a deployment using your `deploy.yaml` file: + ``` + akash tx deployment create deploy.yaml --from --node --chain-id + ``` + +2. **Bid Selection**: + Once the deployment is created, choose a provider: + ``` + akash query market bid list --owner + ``` + Accept a bid: + ``` + akash tx market lease create --owner --dseq --oseq --gseq --from + ``` + +3. **Submit Manifest**: + Submit the deployment manifest: + ``` + akash tx deployment send-manifest deploy.yaml --from + ``` + +--- + +### **Step 4: Verify Deployment** + +1. **Get Lease Info**: + ``` + akash query market lease list --owner + ``` +2. **Access Your Service**: + - Use the `EMQX` broker’s exposed ports (`1883` for MQTT, `8883` for MQTT with SSL). + - Obtain the service’s external IP address from the provider. + +--- + +### **Step 5: Test the EMQX Deployment** + +1. **MQTT Client**: + Use any MQTT client to connect to your EMQX deployment: + - Broker URL: `tcp://:1883` + - SSL Broker URL: `ssl://:8883` + +2. **Admin Dashboard**: + Access the EMQX dashboard using the IP and appropriate port (default is `18083`): + ``` + http://:18083 + ``` + Login using: + - Username: `admin` + - Password: `` + +--- + +### **Step 6: Manage and Scale** + +1. **Update Deployment**: + Modify the `deploy.yaml` file and submit updates as needed. + +2. **Scaling**: + Increase the `count` in the deployment section to add more EMQX nodes. + +--- + +This setup provides a fully functioning EMQX broker on Akash Network, leveraging the flexibility and decentralized hosting capabilities of Akash. For further customization, refer to the [EMQX documentation](https://www.emqx.io/docs) and Akash’s [deployment guides](docs/deployments/akash-cli/overview/). diff --git a/src/content/Docs/guides/tooling/flink/index.md b/src/content/Docs/guides/tooling/flink/index.md new file mode 100644 index 00000000..9a797675 --- /dev/null +++ b/src/content/Docs/guides/tooling/flink/index.md @@ -0,0 +1,150 @@ +--- +categories: ["Guides"] +tags: ["Data Pipelines"] +weight: 1 +title: "Apache Flink" +linkTitle: "Apache Flink" +--- + +Apache Flink is a powerful, distributed stream and batch data processing framework. It enables developers to build robust, scalable, and low-latency data pipelines. Flink is widely used for real-time data streaming, event-driven applications, and large-scale data analysis in industries such as finance, e-commerce, and telecommunications. + +Key Features of Apache Flink: +- **Stream Processing:** Processes data streams with high throughput and low latency. +- **Batch Processing:** Handles large-scale data processing in offline scenarios. +- **Stateful Computations:** Ensures fault tolerance with state snapshots and recovery. +- **Scalability:** Supports scaling up or down in distributed environments. +- **Connectors:** Integrates with a variety of data sources, including Kafka, HDFS, and databases. + +--- + +## **Steps to Deploy Apache Flink on Akash** +This guide outlines how to deploy Apache Flink on Akash, a decentralized cloud platform, using the official Docker image `flink`. + +--- + +## **Step 1: Prerequisites** +1. **Akash Account:** + - Ensure you have an Akash wallet set up with funds. + - Install the Akash CLI (`akash`) and configure it with your account. + +2. **Docker Knowledge:** + - Familiarity with Docker images and containers is essential. + - Understand the `flink` Docker image. + +3. **Deployment YAML File:** + - You'll need an SDL (Service Deployment Language) file to define the Flink deployment on Akash. + +--- + +## **Step 2: Create the SDL File** +Create a file named `flink-deployment.yml` with the following content: + +```yaml +--- +version: "2.0" + +services: + flink-jobmanager: + image: flink:latest + expose: + - port: 8081 + as: 80 + to: + - global + env: + - JOB_MANAGER_RPC_ADDRESS=flink-jobmanager + args: ["jobmanager"] + + flink-taskmanager: + image: flink:latest + env: + - JOB_MANAGER_RPC_ADDRESS=flink-jobmanager + args: ["taskmanager"] + +profiles: + compute: + flink-jobmanager: + resources: + cpu: + units: 500m + memory: + size: 512Mi + storage: + size: 1Gi + flink-taskmanager: + resources: + cpu: + units: 500m + memory: + size: 512Mi + storage: + size: 1Gi + + placement: + global: + pricing: + flink-jobmanager: 0.01 + flink-taskmanager: 0.01 + +deployment: + flink: + flink-jobmanager: + profile: flink-jobmanager + count: 1 + flink-taskmanager: + profile: flink-taskmanager + count: 2 +``` + +--- + +## **Step 3: Deploy on Akash** +1. **Validate the SDL File:** + Run the following command to ensure the SDL file is valid: + ```bash + akash validate flink-deployment.yml + ``` + +2. **Create a Deployment:** + Deploy the application to Akash: + ```bash + akash tx deployment create flink-deployment.yml --from + ``` + +3. **Wait for the Deployment to Be Approved:** + After submitting your deployment, wait for a provider to accept it. You can view the status with: + ```bash + akash query deployment list --owner + ``` + +4. **Access the Flink Dashboard:** + Once the deployment is live, the Flink JobManager dashboard will be accessible via the public URL or IP address on port 80. + +--- + +## **Step 4: Verifying the Deployment** +1. **Flink JobManager:** + Open your browser and navigate to the URL or IP address of the JobManager. You should see the Flink dashboard. + +2. **Submitting a Job:** + Use the Flink CLI or REST API to submit jobs to the cluster. For example: + ```bash + ./bin/flink run -m :8081 -c + ``` + +--- + +## **Step 5: Managing and Scaling** +- **Scaling Up TaskManagers:** + Modify the SDL file and increase the `count` for `flink-taskmanager`. Redeploy with: + ```bash + akash tx deployment update flink-deployment.yml --from + ``` + +- **Logs and Monitoring:** + Use Akash's monitoring tools or access container logs using `kubectl` (if applicable). + +--- + +## **Conclusion** +Deploying Apache Flink on Akash enables decentralized, cost-effective data processing for real-time and batch workloads. By leveraging Akash's decentralized infrastructure and Flink's robust processing capabilities, you can run scalable, low-latency applications for your data-driven needs. \ No newline at end of file diff --git a/src/content/Docs/guides/tooling/jenkins/index.md b/src/content/Docs/guides/tooling/jenkins/index.md new file mode 100644 index 00000000..911a628a --- /dev/null +++ b/src/content/Docs/guides/tooling/jenkins/index.md @@ -0,0 +1,141 @@ +--- +categories: ["Guides"] +tags: ["CI/CD", "DevOps"] +weight: 1 +title: "Jenkins" +linkTitle: "Jenkins" +--- + + + +## Prerequisites +1. **Akash CLI**: Ensure the Akash CLI (`akash`) is installed and configured on your machine. +2. **Akash Account**: You should have an active Akash account with sufficient tokens to fund the deployment. +3. **Akash Wallet**: The wallet must be configured with an active keypair. +4. **Docker Knowledge**: Familiarity with the `jenkins/jenkins` Docker image. +5. **Akash SDL Template**: Use your existing SDL template as the base for the deployment. + +--- + +## Step 1: Create the `deploy.yaml` File +Below is an example `deploy.yaml` file for deploying Jenkins on Akash using the `jenkins/jenkins` Docker image. Replace placeholders (`<...>`) with your details. + +```yaml +--- +version: "2.0" + +services: + jenkins: + image: jenkins/jenkins:lts + expose: + - port: 8080 + as: 80 + accept: + - http + to: + - global + - port: 50000 + as: 50000 + accept: + - tcp + to: + - global + env: + - JAVA_OPTS=-Djenkins.install.runSetupWizard=false + args: + entrypoint: ["/bin/tini", "--", "/usr/local/bin/jenkins.sh"] + +profiles: + compute: + jenkins: + resources: + cpu: + units: 2 + memory: + size: 2Gi + storage: + size: 10Gi + placement: + akash: + attributes: + host: akash + pricing: + jenkins: + denom: uakt + amount: 100 + +deployment: + jenkins: + jenkins: + profile: jenkins + count: 1 +``` + +--- + +## Step 2: Validate the SDL File +Before deploying, validate the SDL file to ensure correctness. + +```bash +akash tx deployment create deploy.yaml --from --chain-id --node --fees +``` + +--- + +## Step 3: Deploy to Akash +1. **Create the Deployment**: + Use the Akash CLI to deploy the `jenkins/jenkins` service. + + ```bash + akash tx deployment create deploy.yaml --from --chain-id --node --fees + ``` + +2. **Check Deployment Status**: + After deploying, monitor the status to ensure it’s active. + + ```bash + akash query deployment list --owner + ``` + +3. **Bid Matching**: + Accept a bid for your deployment if necessary. + + ```bash + akash tx market lease create --owner --dseq --oseq --gseq --from --fees + ``` + +--- + +## Step 4: Access Jenkins +1. **Retrieve the Lease Information**: + Obtain the external IP and ports for accessing Jenkins. + + ```bash + akash query market lease list --owner + ``` + +2. **Login to Jenkins**: + - Access Jenkins via the provided external IP (e.g., `http://:80`). + - Follow the initial Jenkins setup if required or use the `JAVA_OPTS` configuration in the `deploy.yaml` to skip the setup wizard. + +--- + +## Step 5: Secure Jenkins +1. **Set Up an Admin User**: + - Once Jenkins is running, create an admin user to secure your instance. +2. **Install Plugins**: + - Install necessary plugins via the Jenkins dashboard. +3. **Configure Firewall Rules**: + - Use Akash’s security groups or your infrastructure to limit access to Jenkins. + +--- + +## Notes +1. **Data Persistence**: + Ensure that the `storage` size specified in the SDL file is sufficient to store Jenkins data. +2. **Scaling**: + Modify the `deployment` section in the SDL file to scale the Jenkins instance if needed. +3. **Budget Management**: + Monitor your wallet balance to ensure uninterrupted service. + +This guide ensures you can deploy Jenkins efficiently while leveraging Akash's decentralized cloud services. Let me know if you need help with any specific step! \ No newline at end of file diff --git a/src/content/Docs/guides/tooling/jetbrains_hub/index.md b/src/content/Docs/guides/tooling/jetbrains_hub/index.md new file mode 100644 index 00000000..8e607155 --- /dev/null +++ b/src/content/Docs/guides/tooling/jetbrains_hub/index.md @@ -0,0 +1,143 @@ +--- +categories: ["Guides"] +tags: ["User Management"] +weight: 1 +title: "JetBrains Hub" +linkTitle: "JetBrains Hub" +--- + + +## **Step 1: Prerequisites** + +Before you begin, ensure the following: + +1. You have an active Akash wallet and some AKT tokens for deployment. +2. The `akash` CLI is installed and configured. +3. Docker is installed for testing the JetBrains Hub container locally (optional but recommended). + +--- + +## **Step 2: Test JetBrains Hub Locally** + +To verify that the Docker image works as expected: + +``` +docker run -d -p 8080:8080 jetbrains/hub +``` + +Access JetBrains Hub at `http://localhost:8080` in your browser. Follow the initial setup wizard if necessary. + +--- + +## **Step 3: Create an SDL Template** + +Here's a sample SDL template for deploying JetBrains Hub on Akash. Replace placeholders with appropriate values, such as your wallet address. + +### `deploy.yaml` +``` +--- +version: "2.0" + +services: + hub: + image: jetbrains/hub:latest + expose: + - port: 8080 + as: 80 + to: + - global: true + env: + - HUB_BASE_URL=https://your-hub-domain.com + - HUB_BACKUP_DIR=/data/backups + args: + - /bin/bash + command: + - -c + - "java -jar /opt/hub/hub.jar" + +profiles: + compute: + hub: + resources: + cpu: + units: 500m + memory: + size: 1Gi + storage: + size: 5Gi + placement: + hub: + attributes: + host: akash + signedBy: + anyOf: + - "akash1yourwalletaddress" + pricing: + hub: + denom: uakt + amount: 100 + +deployment: + hub: + hub: + profile: hub + count: 1 +``` + +--- + +## **Step 4: Deployment Steps** + +1. **Validate the SDL File** + Run the following command to ensure your SDL file is correctly formatted: + ``` + akash validate deploy.yaml + ``` + +2. **Create a Deployment** + Use the Akash CLI to create a deployment: + ``` + akash tx deployment create deploy.yaml --from --node https://rpc.akash.forbole.com:443 --chain-id akashnet-2 + ``` + +3. **Wait for Bidding** + Monitor the status of your deployment using: + ``` + akash query market lease list --owner + ``` + + Once a bid is matched, you'll need to approve it. + +4. **Approve the Lease** + Approve the lease using: + ``` + akash tx market lease create --from + ``` + +5. **Retrieve Access Details** + After deployment, find the public IP of your service: + ``` + akash query market lease status --dseq + ``` + + Look for the service URI under the `services` section. + +--- + +## **Step 5: Configure JetBrains Hub** + +1. Access the JetBrains Hub URL using the public IP or domain assigned by Akash. +2. Complete the setup wizard by configuring the database, admin account, and other settings. + +--- + +## **Optional: Backup and Persistent Data** + +If you want to enable persistent backups for your JetBrains Hub instance: + +- Mount a volume to `/data/backups` in the SDL file. +- Use Akash's persistent storage (requires configuring storage profiles). + +--- + +This guide deploys JetBrains Hub on Akash in a scalable, cost-effective way. Let me know if you need further assistance! \ No newline at end of file diff --git a/src/content/Docs/guides/tooling/kafka/index.md b/src/content/Docs/guides/tooling/kafka/index.md new file mode 100644 index 00000000..4fca1e08 --- /dev/null +++ b/src/content/Docs/guides/tooling/kafka/index.md @@ -0,0 +1,170 @@ +--- +categories: ["Guides"] +tags: ["STream Processing", "Message Broker"] +weight: 1 +title: "Apache Kafka" +linkTitle: "Apache Kafka" +--- + +Deploying Apache Kafka on Akash using the official `apache/kafka` Docker image involves several steps, including preparing the SDL file, deploying it to the Akash network, and verifying the deployment. Follow the guide below: + +--- + +## **Step 1: Prepare the SDL File** + +Create an SDL (Service Definition Language) file to define your Kafka deployment. Here's an example of an SDL file (`kafka-deployment.yml`): + +``` +version: "2.0" + +services: + kafka: + image: apache/kafka:latest + env: + - KAFKA_BROKER_ID=1 + - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 + - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://{AKASH_HOST}:9092 + - KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 + expose: + - port: 9092 + as: 9092 + to: + - global + + zookeeper: + image: bitnami/zookeeper:latest + env: + - ALLOW_ANONYMOUS_LOGIN=yes + expose: + - port: 2181 + as: 2181 + to: + - global + +profiles: + compute: + kafka: + resources: + cpu: + units: 0.5 + memory: + size: 1Gi + storage: + size: 5Gi + zookeeper: + resources: + cpu: + units: 0.25 + memory: + size: 512Mi + storage: + size: 2Gi + + placement: + akash: + attributes: + region: us-west + signedBy: + anyOf: + - akash + pricing: + kafka: + denom: uakt + amount: 500 + zookeeper: + denom: uakt + amount: 300 + +deployment: + kafka: + profile: kafka + count: 1 + zookeeper: + profile: zookeeper + count: 1 +``` + +**Key Notes:** +- Replace `{AKASH_HOST}` with the hostname or IP of your Akash deployment (it will be assigned later). +- `zookeeper` is required as Kafka relies on it for distributed coordination. +- Adjust resource and pricing configurations based on your requirements. + +--- + +## **Step 2: Deploy the SDL to Akash** + +1. **Install Akash CLI:** + Ensure you have the Akash CLI installed on your system. You can follow [Akash's official installation guide](docs/deployments/akash-cli/overview/). + +2. **Authenticate to Akash:** + ``` + akash wallet import + ``` + +3. **Submit the Deployment:** + ``` + akash tx deployment create kafka-deployment.yml --from + ``` + +4. **Bid on the Deployment:** + Use the Akash CLI to review provider bids and accept a bid: + ``` + akash query market lease list --owner + akash tx market lease create --dseq --from + ``` + +--- + +## **Step 3: Verify Deployment** + +1. **Check Logs:** + Use the Akash CLI to view the logs and ensure the services are running: + ``` + akash provider lease-logs --dseq --from + ``` + +2. **Access Kafka:** + Once deployed, Akash will assign an external hostname or IP for your Kafka service. You can retrieve it using: + ``` + akash provider lease-status --dseq --from + ``` + + Use the `KAFKA_ADVERTISED_LISTENERS` address to interact with Kafka clients. + +--- + +## **Step 4: Test the Kafka Deployment** + +Install Kafka's CLI tools on your local machine and configure them to interact with the deployed Kafka broker. Example commands: + +- **Create a Topic:** + ``` + kafka-topics.sh --create --topic test-topic --bootstrap-server :9092 + ``` + +- **Produce Messages:** + ``` + kafka-console-producer.sh --topic test-topic --bootstrap-server :9092 + ``` + +- **Consume Messages:** + ``` + kafka-console-consumer.sh --topic test-topic --from-beginning --bootstrap-server :9092 + ``` + +--- + +## **Step 5: Monitor and Scale** + +- **Monitor Resource Usage:** + Regularly monitor the usage of your deployment to ensure sufficient resources are allocated. + ``` + akash provider lease-status --dseq + ``` + +- **Scale the Deployment:** + Modify the `count` value in the SDL file to scale Kafka or Zookeeper instances, then redeploy. + +--- + +This guide provides a straightforward way to deploy Kafka on Akash using the official Docker image. You can further customize the SDL file or Kafka configuration to suit specific needs. diff --git a/src/content/Docs/guides/tooling/kong/index.md b/src/content/Docs/guides/tooling/kong/index.md new file mode 100644 index 00000000..80fba90d --- /dev/null +++ b/src/content/Docs/guides/tooling/kong/index.md @@ -0,0 +1,146 @@ +--- +categories: ["Guides"] +tags: ["API",] +weight: 1 +title: "Guide to Deploy Kong on Akash" +linkTitle: "Kong" +--- + +Kong is a scalable, open-source API gateway and service mesh designed to manage, secure, and monitor APIs and microservices. It offers essential features like load balancing, authentication, rate limiting, caching, and logging, making it highly effective for modern applications such as containerized, cloud-native, and microservices architectures. + +Deploying Kong on Akash enables you to take advantage of a decentralized cloud platform to lower hosting costs while ensuring high availability. + +--- + +## Step 1: Prepare Your Kong Deployment Files + +To deploy Kong on Akash, you must create a Service Deployment Language (SDL) file that specifies how to deploy your application, including its resource requirements and container configuration. + +Here's a sample SDL file for deploying Kong on Akash. + +--- + +## Sample SDL File + +```yaml +version: "2.0" + +services: + kong: + image: kong:latest + expose: + - port: 8000 + as: 80 + to: + - global + - port: 8443 + as: 443 + to: + - global + env: + KONG_DATABASE: "off" + KONG_PROXY_ACCESS_LOG: "/dev/stdout" + KONG_PROXY_ERROR_LOG: "/dev/stderr" + KONG_ADMIN_ACCESS_LOG: "/dev/stdout" + KONG_ADMIN_ERROR_LOG: "/dev/stderr" + KONG_PROXY_LISTEN: "0.0.0.0:8000, 0.0.0.0:8443 ssl" + KONG_ADMIN_LISTEN: "0.0.0.0:8001, 0.0.0.0:8444 ssl" + resources: + cpu: + units: 0.5 + memory: + size: 512Mi + storage: + size: 1Gi + +profiles: + compute: + kong-profile: + resources: + cpu: + units: 0.5 + memory: + size: 512Mi + storage: + size: 1Gi + placement: + kong-placement: + attributes: + region: us-west + pricing: + kong: + denom: uakt + amount: 5000 + +deployment: + kong-deployment: + profile: + compute: kong-profile + placement: kong-placement + count: 1 +``` + +--- + +## Step 2: Validate the SDL File + +Before you can deploy, make sure your SDL file is valid: + +1. Save the SDL file as `deploy.yaml`. +2. Validate the SDL file using the Akash CLI: + ```bash + akash tx deployment validate deploy.yaml + ``` + +--- + +## Step 3: Deploy Kong on Akash + +1. **Submit the Deployment** + Deploy the SDL file with the Akash CLI: + ```bash + akash tx deployment create deploy.yaml --from + ``` + +2. **Monitor Deployment Status** + To check the status of your deployment: + ```bash + akash query deployment list --owner + ``` + +3. **Accept a Bid** + When a provider bids on your deployment, accept that bid: + ```bash + akash tx deployment lease create --from --dseq --oseq --gseq --provider + ``` + +4. **Access Kong** + After the deployment has been activated, retrieve the access details (IP and port) for your Kong API Gateway by querying the lease: + ```bash + akash query lease status --owner --provider --dseq + ``` + +--- + +## Step 4: Test Kong Deployment + +To ensure Kong is operational: + +1. Access the Kong API Gateway using the provided IP and port. +2. Test the default proxy at port 80 or 443. +3. Check the Kong Admin API, which is typically available on port 8001 or 8444. + +--- + +## Step 5: Manage and Update Deployment + +- **Update Deployment:** Adjust your `deploy.yaml` file and redeploy using the same commands. +- **Stop Deployment:** To cancel your lease and stop the deployment: + ```bash + akash tx deployment close --from --dseq + ``` + +--- + +By deploying Kong on Akash, you can enjoy the benefits of a decentralized, cost-effective network without sacrificing the powerful API management capabilities Kong offers. + diff --git a/src/content/Docs/guides/tooling/lightstreamer/index.md b/src/content/Docs/guides/tooling/lightstreamer/index.md new file mode 100644 index 00000000..5a2fd042 --- /dev/null +++ b/src/content/Docs/guides/tooling/lightstreamer/index.md @@ -0,0 +1,163 @@ +--- +categories: ["Guides"] +tags: ["Messaging", "MQTT"] +weight: 1 +title: "Lightstreamer" +linkTitle: "Lightstreamer" +--- + + +This guide will walk you through deploying Lightstreamer on Akash, utilizing the official Docker image provided by Lightstreamer. + +--- + +### **Prerequisites** +1. **Akash CLI Setup**: Ensure you have the Akash CLI installed and configured. Refer to the [Akash documentation](https://docs.akash.network/) for setup instructions. +2. **Docker Image**: Use the official Lightstreamer Docker image (`lightstreamer`) from Docker Hub. +3. **Akash Wallet**: Have an Akash wallet funded with AKT tokens to cover deployment costs. +4. **SDL File Template**: Use an SDL (Stack Definition Language) file for deployment configuration. + +--- + +## **Steps to Deploy Lightstreamer** + +### Step 1: Pull the Official Lightstreamer Docker Image +Ensure you can access the Lightstreamer image by pulling it locally: +``` +docker pull lightstreamer +``` + +--- + +### Step 2: Create an SDL File +Create an SDL file (`deploy.yaml`) that specifies your deployment requirements for Lightstreamer. Below is a template SDL file configured for the Lightstreamer Docker container. + +``` +--- +version: "2.0" + +services: + lightstreamer: + image: lightstreamer + env: + - LS_LOGGER_CONF=/lightstreamer/conf/lightstreamer_log_conf.xml + expose: + - port: 8080 + as: 8080 + to: + - global: true + +profiles: + compute: + lightstreamer: + resources: + cpu: + units: 0.5 + memory: + size: 512Mi + storage: + size: 1Gi + + placement: + akash: + attributes: + host: akash + signedBy: + anyOf: + - akash + pricing: + lightstreamer: + denom: uakt + amount: 100 + +deployment: + lightstreamer: + lightstreamer: + profile: + compute: lightstreamer + placement: akash + count: 1 +``` + +**Explanation:** +- **`image`**: Uses the official Lightstreamer Docker image. +- **`env`**: Sets necessary environment variables (modify as needed for Lightstreamer configuration). +- **`port: 8080`**: Exposes port 8080 for external access. +- **Resources**: Allocates 0.5 CPU, 512 MB RAM, and 1 GB storage. +- **Pricing**: Sets a bid price of 100 uAKT (adjust based on your needs). + +--- + +### Step 3: Deploy to Akash +1. **Create the Deployment**: + Use the `akash tx deployment create` command to submit your deployment. + + ``` + akash tx deployment create deploy.yaml --from --chain-id --node + ``` + +2. **Check Deployment Status**: + Verify the status of your deployment to ensure it was accepted: + ``` + akash query deployment list --owner + ``` + +3. **Accept a Bid**: + Once your deployment is active, accept a provider’s bid: + ``` + akash tx deployment lease create --from --chain-id --node + ``` + +4. **Query Lease Status**: + Confirm the lease has been established: + ``` + akash query market lease list --owner + ``` + +--- + +### Step 4: Access the Lightstreamer Service +Once the lease is active, find the external IP and port assigned to your deployment: +``` +akash query provider service-logs --provider --dseq +``` + +Visit the Lightstreamer service in your browser using the assigned URL: +``` +http://:8080 +``` + +--- + +## **Customizing Lightstreamer** +- To customize configurations (e.g., log files or server settings), mount your configuration files into the container. Update the `SDL` file to include a volume mount: + ``` + services: + lightstreamer: + image: lightstreamer + env: + - LS_LOGGER_CONF=/custom/path/log_conf.xml + expose: + - port: 8080 + as: 8080 + to: + - global: true + volumes: + - /local/path/to/config:/lightstreamer/conf + ``` + +--- + +### **Useful Commands** +- **Stop a Deployment**: + ``` + akash tx deployment close --dseq --from + ``` +- **Fetch Logs**: + ``` + akash query provider service-logs --provider --dseq + ``` + +--- + +By following these steps, you can successfully deploy Lightstreamer on the Akash network using its official Docker image. Adjust the resources and configuration as needed for your specific use case. \ No newline at end of file diff --git a/src/content/Docs/guides/tooling/mautic/index.md b/src/content/Docs/guides/tooling/mautic/index.md new file mode 100644 index 00000000..4dea4339 --- /dev/null +++ b/src/content/Docs/guides/tooling/mautic/index.md @@ -0,0 +1,141 @@ +--- +categories: ["Guides"] +tags: ["Marketing Automation"] +weight: 1 +title: "Mautic" +linkTitle: "Mautic" +--- + +To deploy **Mautic** (an open-source marketing automation platform) using the `mautic/mautic` Docker image on the **Akash Network**, you'll need to define an **SDL (Stack Definition Language)** file, configure your environment, and deploy the app. Below is a step-by-step guide: + +--- + +### **1. Requirements** +- Install the Akash CLI on your system ([Guide](https://docs.akash.network/cli/install/)). +- Fund your Akash wallet with AKT tokens to pay for deployment costs. +- A domain name or Akash's automatically generated domain for accessing Mautic. +- Basic knowledge of Docker and Akash SDL configuration. + +--- + +### **2. Create the SDL File** +The SDL file defines the deployment configuration for Mautic on Akash. Below is a sample `mautic.yml` file: + +``` +--- +version: "2.0" + +services: + mautic: + image: mautic/mautic:latest + env: + - MAUTIC_DB_HOST=mautic-db + - MAUTIC_DB_USER=mautic + - MAUTIC_DB_PASSWORD=mautic_password + - MAUTIC_DB_NAME=mautic + expose: + - port: 80 + as: 80 + to: + - global + depends_on: + - mautic-db + + mautic-db: + image: mariadb:latest + env: + - MYSQL_ROOT_PASSWORD=root_password + - MYSQL_DATABASE=mautic + - MYSQL_USER=mautic + - MYSQL_PASSWORD=mautic_password + expose: + - port: 3306 + as: 3306 + to: + - mautic + +profiles: + compute: + mautic-profile: + resources: + cpu: + units: 1 + memory: + size: 1Gi + storage: + size: 5Gi + + placement: + akash: + attributes: + region: us-west # Choose a region + pricing: + mautic-profile: + denom: uakt + amount: 100 + +deployment: + mautic: + profile: mautic-profile + count: 1 +``` + +--- + +### **3. Key Configuration Notes** +- **Services:** + - `mautic`: The Mautic container using the `mautic/mautic` image. + - `mautic-db`: A MariaDB container to serve as the database backend. +- **Environment Variables:** + - Adjust the `MYSQL_` and `MAUTIC_DB_` environment variables according to your needs. +- **Storage:** + - The `storage` size for both `mautic` and `mautic-db` should be sufficient to handle your data needs. +- **Pricing:** + - Modify the `amount` under `pricing` to match your budget. + +--- + +### **4. Deploy Mautic on Akash** +1. **Initialize Deployment**: + Save the `mautic.yml` file and deploy it with Akash CLI: + ``` + akash tx deployment create mautic.yml --from + ``` + +2. **Bid and Lease**: + After submitting the deployment, watch for bids and create a lease: + ``` + akash query market bid list --owner + akash tx market lease create --dseq --from + ``` + +3. **Access the Deployment**: + Use Akash's automatically generated domain or map your custom domain using a CNAME record. + +--- + +### **5. Post-Deployment** +- Visit the Mautic URL (e.g., `http://`) and complete the setup wizard. +- Enter the database details matching your MariaDB container environment variables: + - Host: `mautic-db` + - Database: `mautic` + - Username: `mautic` + - Password: `mautic_password` + +--- + +### **6. Customization (Optional)** +- To use SSL, set up a reverse proxy like **NGINX** or use services like **Cloudflare** for HTTPS. +- Scale resources by updating the `compute` section in the SDL file. + +--- + +### **7. Updating Mautic** +If you need to update the Mautic version, modify the image in the SDL file and redeploy: +``` +image: mautic/mautic: +``` + +--- + +By following these steps, you can successfully deploy and manage **Mautic** on the Akash Network. \ No newline at end of file diff --git a/src/content/Docs/guides/tooling/nats/index.md b/src/content/Docs/guides/tooling/nats/index.md new file mode 100644 index 00000000..4f330e2e --- /dev/null +++ b/src/content/Docs/guides/tooling/nats/index.md @@ -0,0 +1,176 @@ +--- +categories: ["Guides"] +tags: ["Messaging"] +weight: 1 +title: "NATS" +linkTitle: "NATS" +--- + + +NATS is a lightweight, high-performance, open-source messaging system for distributed systems. Deploying it on Akash provides cost-effective, decentralized hosting for your messaging infrastructure. + +--- + +## **Prerequisites** +1. **Akash Account**: Ensure you have an Akash wallet and some AKT tokens for deployment. +2. **Akash CLI**: Installed and configured. Refer to [Akash CLI setup](https://docs.akash.network/guides/cli). +3. **Dockerized NATS Image**: We'll use the official NATS Docker image (`nats:latest`). +4. **Domain/Access**: Optional but recommended to configure DNS for accessing your NATS instance. + +--- + +## **Step 1: Write the SDL File** + +Create an SDL file (e.g., `deploy.yaml`) that defines your NATS deployment: + +``` +--- +version: "2.0" + +services: + nats: + image: nats:latest + env: + - "NATS_SERVER_NAME=nats-server" + expose: + - port: 4222 + as: 4222 + to: + - global + - port: 6222 + as: 6222 + to: + - global + - port: 8222 + as: 8222 + to: + - global + +profiles: + compute: + nats: + resources: + cpu: + units: 0.5 + memory: + size: 512Mi + storage: + size: 1Gi + + placement: + akash: + attributes: + host: akash + signedBy: + anyOf: + - akash + pricing: + nats: + denom: uakt + amount: 1000 + +deployment: + nats: + akash: + profile: nats + count: 1 +``` + +**Explanation:** +- **Services**: Defines the NATS container using the official Docker image. +- **Exposed Ports**: + - `4222`: Client connections. + - `6222`: Cluster connections (if scaling). + - `8222`: Monitoring. +- **Profiles**: + - `compute`: Allocates resources (CPU, memory, storage) for the NATS server. + - `placement`: Specifies deployment attributes and pricing. + +--- + +## **Step 2: Deploy the SDL** + +### 1. **Create a Deployment** +Run the following command to create a deployment on Akash: + +```bash +akash tx deployment create deploy.yaml --from --chain-id --node --fees 5000uakt -y +``` + +Replace: +- ``: Your Akash wallet name. +- ``: Akash chain ID (e.g., `akashnet-2`). +- ``: The Akash RPC node (e.g., `https://rpc.akash.network:443`). + +### 2. **Wait for Deployment Approval** +Use this command to check the status of your deployment: + +```bash +akash query deployment list --owner +``` + +Once approved, you’ll get the `lease_id`. + +--- + +## **Step 3: Validate the Lease** + +After the provider accepts the deployment, create a lease using: + +```bash +akash tx market lease create --dseq --from --chain-id --node --fees 5000uakt -y +``` + +--- + +## **Step 4: Access the NATS Server** + +### 1. **Find the Provider Endpoint** +Run the following command to retrieve the endpoint of your deployment: + +```bash +akash query market lease get --owner --dseq --gseq 1 --oseq 1 --provider +``` + +### 2. **Connect to NATS** +Use the returned endpoint to connect your clients to the NATS server using the appropriate port (e.g., `4222`). + +For example: +```bash +nats-server -connect :4222 +``` + +--- + +## **Optional: Configure DNS** +If you want a user-friendly domain name, map the provider's IP to your domain using a DNS record. This step enhances usability for production deployments. + +--- + +## **Step 5: Monitor the NATS Server** + +The NATS monitoring dashboard is exposed on port `8222`. Access it by navigating to: +``` +http://:8222 +``` + +--- + +## **Step 6: Manage Your Deployment** + +### **To Update Deployment**: +Modify the SDL file and use: +```bash +akash tx deployment update deploy.yaml --from --chain-id --node --fees 5000uakt -y +``` + +### **To Close Deployment**: +Terminate the deployment when it’s no longer needed: +```bash +akash tx deployment close --dseq --from --chain-id --node --fees 5000uakt -y +``` + +--- + +## **Conclusion** +This guide walks you through deploying NATS on Akash Network, ensuring cost-efficiency, scalability, and decentralization for your messaging infrastructure. Adjust the SDL file as needed for your use case, such as scaling resources or configuring security. \ No newline at end of file diff --git a/src/content/Docs/guides/tooling/pulsar/index.md b/src/content/Docs/guides/tooling/pulsar/index.md new file mode 100644 index 00000000..ba68d6d0 --- /dev/null +++ b/src/content/Docs/guides/tooling/pulsar/index.md @@ -0,0 +1,113 @@ +--- +categories: ["Guides"] +tags: ["Messaging", "Streaming"] +weight: 1 +title: "Apache Pulsar" +linkTitle: "Apache Pulsar" +--- + +Here’s a guide to deploying **Apache Pulsar** with all the official connectors and offloaders using the `apachepulsar/pulsar-all` image. + +## Steps to Deploy Apache Pulsar on Akash Network + +1. **Prepare Environment** + - Install Akash CLI or use an existing deployment manager for the Akash network. + - Ensure you have the necessary funds in your Akash wallet to deploy. + +2. **Write the Deployment YAML** + +Below is the `deploy.yaml` configuration: + +``` +--- +version: "2.0" +services: + pulsar: + image: apachepulsar/pulsar-all:latest + expose: + - port: 6650 # Pulsar Broker Port + as: 6650 + to: + - global: true + - port: 8080 # Pulsar Admin API Port + as: 80 + to: + - global: true + env: + - PULSAR_MEM: "-Xms2g -Xmx2g -XX:MaxDirectMemorySize=4g" + - PULSAR_STANDALONE_CONF: "/pulsar/conf/standalone.conf" + - PULSAR_PREFIX_clusterName: "standalone" + args: + - bin/pulsar + - standalone + - "--no-functions-worker" + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/admin/v2/clusters"] + interval: 30s + timeout: 5s + retries: 3 + +profiles: + compute: + pulsar: + resources: + cpu: + units: "2.0" + memory: + size: "4Gi" + storage: + - size: "20Gi" + + placement: + global: + pricing: + pulsar: + denom: uakt + amount: 500 + +deployment: + pulsar: + global: + profile: pulsar + count: 1 +``` + +## Explanation of the Deployment Configuration + +- **Image**: Uses `apachepulsar/pulsar-all:latest`, which includes all official connectors and offloaders. +- **Ports**: + - 6650: The Pulsar broker port for client connections. + - 8080: The Pulsar Admin REST API for managing the Pulsar cluster. +- **Environment Variables**: + - Configures JVM memory and standalone Pulsar setup. +- **Health Check**: + - Ensures that the Pulsar Admin API is accessible to verify the service is running correctly. +- **Resource Allocation**: + - Assigns 2 CPU cores, 4GB memory, and 20GB storage to the service. + +## Deployment Instructions + +1. Save the YAML file as `deploy.yaml`. +2. Deploy it using the Akash CLI: + ``` + akash tx deployment create deploy.yaml --from --chain-id --node + ``` +3. Monitor the deployment: + ``` + akash query deployment list --owner + ``` + +## Verifying the Deployment + +1. Access the Pulsar Admin API: + ``` + curl http://:80/admin/v2/clusters + ``` + Replace `` with the URL provided by Akash for your deployment. + +2. Connect a Pulsar client to the broker: + ``` + pulsar-client produce my-topic --messages "Hello, Pulsar!" + ``` + +This setup deploys Apache Pulsar with all connectors and offloaders, ready for production use. \ No newline at end of file diff --git a/src/content/Docs/guides/tooling/rabbitmq/index.md b/src/content/Docs/guides/tooling/rabbitmq/index.md new file mode 100644 index 00000000..263603ed --- /dev/null +++ b/src/content/Docs/guides/tooling/rabbitmq/index.md @@ -0,0 +1,168 @@ +--- +categories: ["Guides"] +tags: ["Messaging", "Streaming"] +weight: 1 +title: "Guide to Deploy RocketMQ on Akash " +linkTitle: "RabbitMQ" +--- + + +This guide will help you deploy Apache RocketMQ on Akash, leveraging the official Docker image. RocketMQ is a distributed messaging and streaming platform. The steps include creating an SDL file for Akash, setting up the RocketMQ broker and nameserver, and deploying them on Akash. + +--- + +### **Prerequisites** +1. **Akash CLI Installed**: Make sure you have the Akash CLI installed and configured. +2. **Docker Image**: Use the official RocketMQ Docker image: `apache/rocketmq:latest`. +3. **Akash Account**: Ensure you have AKT tokens and are ready to deploy. +4. **Ports Used**: + - Nameserver: `9876` + - Broker: `10911`, `10912` (communication) and `10909` (Web access) + +--- + +## **Step 1: Prepare the Akash SDL File** + +The SDL (Stack Definition Language) file describes your deployment. Here’s an example `rocketmq-deployment.yml` for deploying RocketMQ's nameserver and broker: + +```yaml +version: "2.0" + +services: + rocketmq-nameserver: + image: apache/rocketmq:latest + args: ["sh", "-c", "mqnamesrv"] + env: + - JAVA_OPT=-Duser.timezone=UTC + expose: + - port: 9876 + as: 9876 + to: + - global: true + + rocketmq-broker: + image: apache/rocketmq:latest + args: ["sh", "-c", "mqbroker -n rocketmq-nameserver:9876"] + env: + - JAVA_OPT=-Duser.timezone=UTC + - BROKER_ID=0 + - BROKER_NAME=broker-a + - NAMESRV_ADDR=rocketmq-nameserver:9876 + expose: + - port: 10909 + as: 10909 + to: + - global: true + - port: 10911 + as: 10911 + to: + - global: true + - port: 10912 + as: 10912 + to: + - global: true + +profiles: + compute: + rocketmq-nameserver: + resources: + cpu: + units: 500m + memory: + size: 512Mi + storage: + size: 1Gi + rocketmq-broker: + resources: + cpu: + units: 1 + memory: + size: 1Gi + storage: + size: 2Gi + +deployment: + rocketmq: + rocketmq-nameserver: + profile: rocketmq-nameserver + count: 1 + rocketmq-broker: + profile: rocketmq-broker + count: 1 +``` + +--- + +## **Step 2: Validate the SDL File** + +Run the following command to validate the SDL file syntax: + +```bash +akash validate --manifest rocketmq-deployment.yml +``` + +If there are errors, correct them before proceeding. + +--- + +## **Step 3: Deploy RocketMQ on Akash** + +1. **Create the Deployment:** + + Submit the deployment to Akash: + + ```bash + akash tx deployment create rocketmq-deployment.yml --from --gas auto --gas-prices 0.025uakt + ``` + +2. **Check for Bids:** + + Monitor bids from providers using: + + ```bash + akash query market bid list --owner + ``` + +3. **Accept a Bid:** + + Once you find a suitable bid, accept it: + + ```bash + akash tx market lease create --owner --provider --dseq --from + ``` + +--- + +## **Step 4: Access RocketMQ** + +Once deployed, you can access RocketMQ’s services globally using the assigned domain or IP and the following ports: + +1. **Nameserver**: `:9876` +2. **Broker Web UI**: `:10909` +3. **Broker Communication**: Ports `10911` and `10912`. + +--- + +## **Step 5: Test RocketMQ** + +1. **Install the RocketMQ Client** on your local machine or server. +2. Configure the `NAMESRV_ADDR` to point to the Akash deployment: `:9876`. +3. Use RocketMQ’s CLI or SDKs to produce and consume messages to ensure everything is working. + +--- + +## **Step 6: Monitor and Manage RocketMQ** + +- Check logs by accessing the container logs using Akash CLI. +- Update the deployment if needed by modifying the SDL and resubmitting the deployment. + +--- + +## **Optional Enhancements** +- **Persistent Storage**: Add persistent storage for message logs. +- **Scaling**: Use Akash profiles to scale the broker or nameserver. +- **Secure Access**: Add reverse proxy or VPN to secure RocketMQ services. + +--- + +This guide provides a straightforward method to deploy RocketMQ on Akash using the official Docker image. \ No newline at end of file diff --git a/src/content/Docs/guides/tooling/storm/index.md b/src/content/Docs/guides/tooling/storm/index.md new file mode 100644 index 00000000..1e530a70 --- /dev/null +++ b/src/content/Docs/guides/tooling/storm/index.md @@ -0,0 +1,123 @@ +--- +categories: ["Guides"] +tags: ["ETL"] +weight: 1 +title: "Apache Storm" +linkTitle: "Apache Storm" +--- + +Apache Storm is a distributed, real-time computation system designed for processing large streams of data with high throughput and low latency. It is widely used for tasks like real-time analytics, distributed processing, ETL (Extract, Transform, Load) pipelines, and machine learning pipelines. Apache Storm integrates seamlessly with other systems, supports a wide range of programming languages, and provides guaranteed data processing. + +## **Deploying Apache Storm on Akash using the `storm:latest` Image** + +Akash is a decentralized cloud computing platform that allows users to deploy containerized applications at a fraction of the cost of traditional cloud providers. Below is a step-by-step guide to deploying Apache Storm using the `storm:latest` Docker image. + +--- + +## **Step 1: Prerequisites** + +1. **Install Akash CLI**: Ensure you have the Akash Command Line Interface (CLI) installed. You can follow [Akash's documentation](docs/deployments/akash-cli/overview/) to install and set up your CLI. +2. **Create a Wallet**: Create and fund an Akash wallet with the required AKT tokens to pay for the deployment. +3. **Install Docker**: Make sure Docker is installed to verify the `storm:latest` image locally if needed. +4. **Create an SDL Template**: Akash deployments are configured using an SDL file (Service Definition Language). + +--- + +## **Step 2: Create the SDL File** + +Below is an example of an SDL file to deploy the `storm:latest` container on Akash: + +``` +version: "2.0" + +services: + apache-storm: + image: storm:latest + expose: + - port: 8080 + as: 80 + to: + - global + - port: 6627 + as: 6627 + to: + - global + env: + - STORM_LOG_DIR=/var/log/storm + - STORM_HOME=/usr/share/storm + command: + - "bin/storm" + - "nimbus" + +profiles: + compute: + apache-storm: + resources: + cpu: + units: 1 + memory: + size: 1Gi + storage: + size: 1Gi + + placement: + west-coast: + attributes: + region: us-west + pricing: + apache-storm: + denom: uakt + amount: 100 + +deployment: + apache-storm: + west-coast: + profile: apache-storm + count: 1 +``` + +--- + +## **Step 3: Submit the Deployment** + +1. Save the SDL file as `storm-deployment.yaml`. +2. Use the Akash CLI to create and submit the deployment: + ``` + akash tx deployment create storm-deployment.yaml --from + ``` +3. Wait for the deployment to be accepted and lease to be created. + +--- + +## **Step 4: Access Apache Storm** + +Once the deployment is live: +1. Access the **Nimbus UI** (Apache Storm's web interface) at the exposed port (e.g., `http://:80`). +2. For clients or workers to connect, use the Nimbus RPC port (6627) to submit and manage topologies. + +--- + +## **Step 5: Verify Deployment** + +1. Use the Akash CLI to get the lease status: + ``` + akash query deployment get --owner --dseq + ``` +2. Confirm that the service is running and accessible via the assigned IP address. + +--- + +## **Optional Configurations** + +- **Scaling**: To scale the deployment (e.g., adding workers), modify the `count` in the `deployment` section of the SDL file. +- **Persistent Storage**: Add a volume to persist logs or data. +- **Custom Configuration**: Use environment variables to pass additional configurations to Storm. + +--- + +## **Monitoring and Maintenance** + +- Monitor logs using the `docker logs` equivalent in Akash for debugging. +- Regularly check resource utilization to ensure optimal performance. + +By following this guide, you can successfully deploy and run Apache Storm on the Akash network, leveraging its decentralized compute infrastructure for cost-effective and scalable real-time data processing. \ No newline at end of file diff --git a/src/content/Docs/guides/tooling/terraform/index.md b/src/content/Docs/guides/tooling/terraform/index.md new file mode 100644 index 00000000..78306759 --- /dev/null +++ b/src/content/Docs/guides/tooling/terraform/index.md @@ -0,0 +1,150 @@ +--- +categories: ["Guides"] +tags: ["Infrastructure-as-Code"] +weight: 1 +title: "Terraform" +linkTitle: "Terraform" +--- + +## **1. Prerequisites** +Before deploying, ensure the following: +- **Akash CLI** is installed and configured. +- You have an Akash wallet funded with sufficient AKT tokens. +- Basic knowledge of writing SDL files for Akash deployments. + +--- + +## **2. Create the Deployment SDL File** +Here's an example SDL file for deploying the `hashicorp/terraform` Docker image on Akash: + +``` +version: "2.0" + +services: + terraform: + image: hashicorp/terraform:latest + command: ["sh", "-c", "tail -f /dev/null"] # Keeps the container running + expose: + - port: 8080 + as: 80 + to: + - global: true + +profiles: + compute: + terraform: + resources: + cpu: + units: 1 + memory: + size: 512Mi + storage: + size: 1Gi + + placement: + terraform: + attributes: + region: us-west # Change region as needed + pricing: + terraform: + denom: uakt + amount: 100 # Cost per block (adjust as needed) + +deployment: + terraform: + terraform: + profile: terraform + count: 1 +``` + +This SDL file: +- Uses the `hashicorp/terraform` Docker image. +- Exposes port 80 globally for potential HTTP-based services. +- Allocates 1 CPU unit, 512 Mi of memory, and 1 Gi of storage. + +--- + +## **3. Deploy the SDL File on Akash** +1. **Validate the SDL file**: + Run the following command to ensure the SDL file is valid: + ``` + akash deployment validate .yaml + ``` + +2. **Create the Deployment**: + Use the Akash CLI to create the deployment: + ``` + akash tx deployment create .yaml --from --node + ``` + +3. **View the Deployment Status**: + Check the deployment status: + ``` + akash query deployment list --owner + ``` + +4. **Accept a Bid**: + Once a provider makes a bid, accept it: + ``` + akash tx market lease create --dseq --from + ``` + +5. **Get the Lease Information**: + After accepting the bid, retrieve the lease details: + ``` + akash query market lease list --owner + ``` + +--- + +## **4. Access the Terraform Container** +Once the deployment is live, you can access the `terraform` service. + +1. **Retrieve Service Endpoint**: + Get the service's public IP/endpoint: + ``` + akash query market lease status --dseq + ``` + +2. **SSH or Use Akash Logs**: + - If the container is running, you can connect using `kubectl exec` if you have a setup to manage pods. + - Alternatively, tail logs: + ``` + akash query deployment logs --dseq --from + ``` + +--- + +## **5. Using Terraform in the Container** +To run Terraform commands inside the container: +1. **Use Akash's interactive shell** (if supported): + ``` + akash exec run + ``` +2. Inside the container, initialize Terraform: + ``` + terraform init + ``` +3. Apply a configuration: + ``` + terraform apply + ``` + Mount your configuration files using volume mounts, or copy the configuration into the container via interactive shell commands. + +--- + +## **6. Monitoring and Managing the Deployment** +- Use `akash query deployment` commands to monitor deployment health and logs. +- Scale or update resources by modifying and re-deploying the SDL. + +--- + +## **7. Terminate the Deployment** +When the deployment is no longer needed, close it to stop incurring costs: +``` +akash tx deployment close --dseq --from +``` + +--- + +By following this guide, you can deploy and manage Terraform containers on Akash. Adjust the SDL as necessary for your specific Terraform usage scenario. \ No newline at end of file diff --git a/src/content/Docs/guides/tooling/traefik/index.md b/src/content/Docs/guides/tooling/traefik/index.md new file mode 100644 index 00000000..9918c9f6 --- /dev/null +++ b/src/content/Docs/guides/tooling/traefik/index.md @@ -0,0 +1,134 @@ +--- +categories: ["Guides"] +tags: ["Proxy", "API"] +weight: 1 +title: "Traefik" +linkTitle: "Traefik" +--- + +### **Quick Overview of Traefik** + +**Traefik** is a modern reverse proxy and load balancer designed to handle dynamic environments, such as containerized applications. It's popular for its simplicity and features like automatic SSL, seamless integration with container orchestrators (Docker, Kubernetes, etc.), and dynamic configuration. Traefik is often used as an edge router for microservices, acting as the gateway between external requests and internal services. + +--- + +### **Guide to Deploy Traefik on Akash** + +This guide covers deploying Traefik on the **Akash Network**, a decentralized cloud platform. The `traefik` Docker image will be used, and we will configure it to expose services dynamically. + +--- + +#### **Prerequisites** + +1. **Akash CLI**: Installed and configured on your system. +2. **Akash Account**: Funded with AKT tokens. +3. **Docker Compose YAML or equivalent**: A basic setup to test Traefik routing. +4. **Domain**: (Optional) If you want to configure automatic SSL. + +--- + +#### **Steps to Deploy Traefik** + +##### **1. Define the Deployment (SDL File)** + +Create a file named `deploy.yaml`. Below is an example SDL template for deploying Traefik: + +``` +version: "2.0" + +services: + traefik: + image: traefik + expose: + - port: 80 + to: + - global: true + - port: 443 + to: + - global: true + env: + - TRAEFIK_PROVIDERS_DOCKER=true + - TRAEFIK_API_INSECURE=true + - TRAEFIK_ENTRYPOINTS_WEB_ADDRESS=:80 + - TRAEFIK_ENTRYPOINTS_WEBSECURE_ADDRESS=:443 + args: + - "--entrypoints.web.address=:80" + - "--entrypoints.websecure.address=:443" + - "--providers.docker" + - "--api.insecure=true" + - "--certificatesresolvers.myresolver.acme.tlschallenge=true" + - "--certificatesresolvers.myresolver.acme.email=" + - "--certificatesresolvers.myresolver.acme.storage=/letsencrypt/acme.json" + volumes: + - size: 2Gi + mount: /letsencrypt + +profiles: + compute: + traefik: + resources: + cpu: + units: 0.5 + memory: + size: 512Mi + storage: + size: 2Gi + +deployment: + traefik: + traefik: + profile: traefik + count: 1 +``` + +--- + +##### **2. Deploy on Akash** + +1. **Initialize Deployment**: + Run the following command to create a deployment: + ``` + akash tx deployment create deploy.yaml --from --node https://rpc.akashnet.io --chain-id akashnet-2 + ``` + +2. **View Lease Information**: + After successful deployment, query the lease to get the deployment's details: + ``` + akash query deployment list --owner --state active + ``` + +3. **Expose Traefik's Public Endpoint**: + Use the lease information to query the provider and fetch the public endpoint (IP or domain): + ``` + akash provider lease-status --node --chain-id akashnet-2 + ``` + +--- + +##### **3. Test the Deployment** + +1. Open the public endpoint in a browser or use `curl`: + ``` + curl http:// + ``` + +2. If you enabled the insecure API, you can visit the Traefik dashboard at: + ``` + http://:8080/dashboard/ + ``` + +--- + +##### **4. Add Services to Route Through Traefik** + +1. Ensure your services are exposed to the Traefik instance, and they publish metadata that Traefik can read. This can be done with labels (if using Docker) or through additional configurations. + +2. Update the `traefik` container’s configuration to recognize your services using its provider configuration. + +--- + +#### **Customizations** + +- **SSL/TLS**: Add your email and domain in the `certificatesresolvers.myresolver.acme` fields to configure Let's Encrypt for HTTPS. +- **Scaling**: Update the `count` in the `deployment` section to scale the number of Traefik instances. +