Skip to content

Commit

Permalink
Merge branch 'stackblitz-labs:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
jerryokk authored Jan 6, 2025
2 parents 763efb8 + 6f524fd commit da71ba5
Show file tree
Hide file tree
Showing 16 changed files with 1,278 additions and 15 deletions.
11 changes: 11 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,17 @@ XAI_API_KEY=
# You only need this environment variable set if you want to use Perplexity models
PERPLEXITY_API_KEY=

# Get your AWS configuration
# https://console.aws.amazon.com/iam/home
# The JSON should include the following keys:
# - region: The AWS region where Bedrock is available.
# - accessKeyId: Your AWS access key ID.
# - secretAccessKey: Your AWS secret access key.
# - sessionToken (optional): Temporary session token if using an IAM role or temporary credentials.
# Example JSON:
# {"region": "us-east-1", "accessKeyId": "yourAccessKeyId", "secretAccessKey": "yourSecretAccessKey", "sessionToken": "yourSessionToken"}
AWS_BEDROCK_CONFIG=

# Include this environment variable if you want more logging for debugging locally
VITE_LOG_LEVEL=debug

Expand Down
2 changes: 2 additions & 0 deletions .github/workflows/docker.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ on:

permissions:
packages: write
contents: read

env:
REGISTRY: ghcr.io
Expand Down Expand Up @@ -64,6 +65,7 @@ jobs:
context: .
file: ./Dockerfile
target: ${{ env.BUILD_TARGET }}
platforms: linux/amd64,linux/arm64
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
Expand Down
11 changes: 8 additions & 3 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ ARG OLLAMA_API_BASE_URL
ARG XAI_API_KEY

Check warning on line 28 in Dockerfile

View workflow job for this annotation

GitHub Actions / docker-build-publish

Sensitive data should not be used in the ARG or ENV commands

SecretsUsedInArgOrEnv: Do not use ARG or ENV instructions for sensitive data (ARG "XAI_API_KEY") More info: https://docs.docker.com/go/dockerfile/rule/secrets-used-in-arg-or-env/
ARG TOGETHER_API_KEY

Check warning on line 29 in Dockerfile

View workflow job for this annotation

GitHub Actions / docker-build-publish

Sensitive data should not be used in the ARG or ENV commands

SecretsUsedInArgOrEnv: Do not use ARG or ENV instructions for sensitive data (ARG "TOGETHER_API_KEY") More info: https://docs.docker.com/go/dockerfile/rule/secrets-used-in-arg-or-env/
ARG TOGETHER_API_BASE_URL
ARG AWS_BEDROCK_CONFIG
ARG VITE_LOG_LEVEL=debug
ARG DEFAULT_NUM_CTX

Expand All @@ -42,14 +43,16 @@ ENV WRANGLER_SEND_METRICS=false \
XAI_API_KEY=${XAI_API_KEY} \
TOGETHER_API_KEY=${TOGETHER_API_KEY} \
TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \
AWS_BEDROCK_CONFIG=${AWS_BEDROCK_CONFIG} \
VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \
DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}
DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}\
RUNNING_IN_DOCKER=true

# Pre-configure wrangler to disable metrics
RUN mkdir -p /root/.config/.wrangler && \
echo '{"enabled":false}' > /root/.config/.wrangler/metrics.json

RUN npm run build
RUN pnpm run build

CMD [ "pnpm", "run", "dockerstart"]

Expand Down Expand Up @@ -80,8 +83,10 @@ ENV GROQ_API_KEY=${GROQ_API_KEY} \
XAI_API_KEY=${XAI_API_KEY} \
TOGETHER_API_KEY=${TOGETHER_API_KEY} \
TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \
AWS_BEDROCK_CONFIG=${AWS_BEDROCK_CONFIG} \
VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \
DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}
DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}\
RUNNING_IN_DOCKER=true

RUN mkdir -p ${WORKDIR}/run
CMD pnpm run dev --host
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ bolt.diy was originally started by [Cole Medin](https://www.youtube.com/@ColeMed
- ⬜ Voice prompting
- ⬜ Azure Open AI API Integration
- ✅ Perplexity Integration (@meetpateltech)
- ✅ AWS Bedrock Integration (@kunjabijukchhe)
- ⬜ Vertex AI Integration

## Features
Expand Down
1 change: 1 addition & 0 deletions app/components/settings/data/DataTab.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ const API_KEY_PROVIDERS = [
'Perplexity',
'Cohere',
'AzureOpenAI',
'AmazonBedrock',
] as const;

interface ApiKeys {
Expand Down
113 changes: 113 additions & 0 deletions app/lib/modules/llm/providers/amazon-bedrock.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
import { BaseProvider } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { LanguageModelV1 } from 'ai';
import type { IProviderSetting } from '~/types/model';
import { createAmazonBedrock } from '@ai-sdk/amazon-bedrock';

interface AWSBedRockConfig {
region: string;
accessKeyId: string;
secretAccessKey: string;
sessionToken?: string;
}

export default class AmazonBedrockProvider extends BaseProvider {
name = 'AmazonBedrock';
getApiKeyLink = 'https://console.aws.amazon.com/iam/home';

config = {
apiTokenKey: 'AWS_BEDROCK_CONFIG',
};

staticModels: ModelInfo[] = [
{
name: 'anthropic.claude-3-5-sonnet-20240620-v1:0',
label: 'Claude 3.5 Sonnet (Bedrock)',
provider: 'AmazonBedrock',
maxTokenAllowed: 4096,
},
{
name: 'anthropic.claude-3-sonnet-20240229-v1:0',
label: 'Claude 3 Sonnet (Bedrock)',
provider: 'AmazonBedrock',
maxTokenAllowed: 4096,
},
{
name: 'anthropic.claude-3-haiku-20240307-v1:0',
label: 'Claude 3 Haiku (Bedrock)',
provider: 'AmazonBedrock',
maxTokenAllowed: 4096,
},
{
name: 'amazon.nova-pro-v1:0',
label: 'Amazon Nova Pro (Bedrock)',
provider: 'AmazonBedrock',
maxTokenAllowed: 5120,
},
{
name: 'amazon.nova-lite-v1:0',
label: 'Amazon Nova Lite (Bedrock)',
provider: 'AmazonBedrock',
maxTokenAllowed: 5120,
},
{
name: 'mistral.mistral-large-2402-v1:0',
label: 'Mistral Large 24.02 (Bedrock)',
provider: 'AmazonBedrock',
maxTokenAllowed: 8192,
},
];

private _parseAndValidateConfig(apiKey: string): AWSBedRockConfig {
let parsedConfig: AWSBedRockConfig;

try {
parsedConfig = JSON.parse(apiKey);
} catch {
throw new Error(
'Invalid AWS Bedrock configuration format. Please provide a valid JSON string containing region, accessKeyId, and secretAccessKey.',
);
}

const { region, accessKeyId, secretAccessKey, sessionToken } = parsedConfig;

if (!region || !accessKeyId || !secretAccessKey) {
throw new Error(
'Missing required AWS credentials. Configuration must include region, accessKeyId, and secretAccessKey.',
);
}

return {
region,
accessKeyId,
secretAccessKey,
...(sessionToken && { sessionToken }),
};
}

getModelInstance(options: {
model: string;
serverEnv: any;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}): LanguageModelV1 {
const { model, serverEnv, apiKeys, providerSettings } = options;

const { apiKey } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: providerSettings?.[this.name],
serverEnv: serverEnv as any,
defaultBaseUrlKey: '',
defaultApiTokenKey: 'AWS_BEDROCK_CONFIG',
});

if (!apiKey) {
throw new Error(`Missing API key for ${this.name} provider`);
}

const config = this._parseAndValidateConfig(apiKey);
const bedrock = createAmazonBedrock(config);

return bedrock(model);
}
}
35 changes: 30 additions & 5 deletions app/lib/modules/llm/providers/lmstudio.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import { createOpenAI } from '@ai-sdk/openai';
import type { LanguageModelV1 } from 'ai';
import { logger } from '~/utils/logger';

export default class LMStudioProvider extends BaseProvider {
name = 'LMStudio';
Expand All @@ -22,7 +23,7 @@ export default class LMStudioProvider extends BaseProvider {
settings?: IProviderSetting,
serverEnv: Record<string, string> = {},
): Promise<ModelInfo[]> {
const { baseUrl } = this.getProviderBaseUrlAndKey({
let { baseUrl } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: settings,
serverEnv,
Expand All @@ -31,7 +32,18 @@ export default class LMStudioProvider extends BaseProvider {
});

if (!baseUrl) {
return [];
throw new Error('No baseUrl found for LMStudio provider');
}

if (typeof window === 'undefined') {
/*
* Running in Server
* Backend: Check if we're running in Docker
*/
const isDocker = process.env.RUNNING_IN_DOCKER === 'true';

baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl;
baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl;
}

const response = await fetch(`${baseUrl}/v1/models`);
Expand All @@ -51,13 +63,26 @@ export default class LMStudioProvider extends BaseProvider {
providerSettings?: Record<string, IProviderSetting>;
}) => LanguageModelV1 = (options) => {
const { apiKeys, providerSettings, serverEnv, model } = options;
const { baseUrl } = this.getProviderBaseUrlAndKey({
let { baseUrl } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings,
providerSettings: providerSettings?.[this.name],
serverEnv: serverEnv as any,
defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL',
defaultApiTokenKey: '',
});

if (!baseUrl) {
throw new Error('No baseUrl found for LMStudio provider');
}

if (typeof window === 'undefined') {
const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl;
baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl;
}

logger.debug('LMStudio Base Url used: ', baseUrl);

const lmstudio = createOpenAI({
baseUrl: `${baseUrl}/v1`,
apiKey: '',
Expand Down
25 changes: 21 additions & 4 deletions app/lib/modules/llm/providers/ollama.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import type { LanguageModelV1 } from 'ai';
import { ollama } from 'ollama-ai-provider';
import { logger } from '~/utils/logger';

interface OllamaModelDetails {
parent_model: string;
Expand Down Expand Up @@ -45,7 +46,7 @@ export default class OllamaProvider extends BaseProvider {
settings?: IProviderSetting,
serverEnv: Record<string, string> = {},
): Promise<ModelInfo[]> {
const { baseUrl } = this.getProviderBaseUrlAndKey({
let { baseUrl } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings: settings,
serverEnv,
Expand All @@ -54,7 +55,18 @@ export default class OllamaProvider extends BaseProvider {
});

if (!baseUrl) {
return [];
throw new Error('No baseUrl found for OLLAMA provider');
}

if (typeof window === 'undefined') {
/*
* Running in Server
* Backend: Check if we're running in Docker
*/
const isDocker = process.env.RUNNING_IN_DOCKER === 'true';

baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl;
baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl;
}

const response = await fetch(`${baseUrl}/api/tags`);
Expand All @@ -78,18 +90,23 @@ export default class OllamaProvider extends BaseProvider {
const { apiKeys, providerSettings, serverEnv, model } = options;
let { baseUrl } = this.getProviderBaseUrlAndKey({
apiKeys,
providerSettings,
providerSettings: providerSettings?.[this.name],
serverEnv: serverEnv as any,
defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
defaultApiTokenKey: '',
});

// Backend: Check if we're running in Docker
const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
if (!baseUrl) {
throw new Error('No baseUrl found for OLLAMA provider');
}

const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl;
baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl;

logger.debug('Ollama Base Url used: ', baseUrl);

const ollamaInstance = ollama(model, {
numCtx: DEFAULT_NUM_CTX,
}) as LanguageModelV1 & { config: any };
Expand Down
2 changes: 2 additions & 0 deletions app/lib/modules/llm/registry.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import PerplexityProvider from './providers/perplexity';
import TogetherProvider from './providers/together';
import XAIProvider from './providers/xai';
import HyperbolicProvider from './providers/hyperbolic';
import AmazonBedrockProvider from './providers/amazon-bedrock';

export {
AnthropicProvider,
Expand All @@ -32,4 +33,5 @@ export {
XAIProvider,
TogetherProvider,
LMStudioProvider,
AmazonBedrockProvider,
};
3 changes: 2 additions & 1 deletion app/routes/api.chat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,8 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
return new Response(stream.readable, {
status: 200,
headers: {
contentType: 'text/plain; charset=utf-8',
contentType: 'text/event-stream',
connection: 'keep-alive',
},
});
} catch (error: any) {
Expand Down
2 changes: 2 additions & 0 deletions docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ services:
- XAI_API_KEY=${XAI_API_KEY}
- TOGETHER_API_KEY=${TOGETHER_API_KEY}
- TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL}
- AWS_BEDROCK_CONFIG=${AWS_BEDROCK_CONFIG}
- VITE_LOG_LEVEL=${VITE_LOG_LEVEL:-debug}
- DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX:-32768}
- RUNNING_IN_DOCKER=true
Expand Down Expand Up @@ -54,6 +55,7 @@ services:
- OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL}
- TOGETHER_API_KEY=${TOGETHER_API_KEY}
- TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL}
- AWS_BEDROCK_CONFIG=${AWS_BEDROCK_CONFIG}
- VITE_LOG_LEVEL=${VITE_LOG_LEVEL:-debug}
- DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX:-32768}
- RUNNING_IN_DOCKER=true
Expand Down
1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
"@ai-sdk/google": "^0.0.52",
"@ai-sdk/mistral": "^0.0.43",
"@ai-sdk/openai": "^0.0.66",
"@ai-sdk/amazon-bedrock": "1.0.6",
"@codemirror/autocomplete": "^6.18.3",
"@codemirror/commands": "^6.7.1",
"@codemirror/lang-cpp": "^6.0.2",
Expand Down
Loading

0 comments on commit da71ba5

Please sign in to comment.