This is the Python client package for Javelin.
For more information about Javelin, see https://getjavelin.io
Javelin Documentation: https://docs.getjavelin.io
For local development, Please change version = "RELEASE_VERSION"
with any semantic version example : version = "v0.1.10"
in pyproject.toml
Make sure that the file pyproject.toml
reverted before commit back to main
pip install javelin_sdk
# Create virtual environment
python -m venv venv
# Activate virtual environment
venv\Scripts\activate
# Install dependencies
pip install poetry
poetry install
# Create virtual environment
python -m venv venv
# Activate virtual environment
source venv/bin/activate
# Install dependencies
pip install poetry
poetry install
# Uninstall any existing version
pip uninstall javelin_sdk -y
# Build the package
poetry build
# Install the newly built package
pip install dist/javelin_sdk-<version>-py3-none-any.whl
from openai import OpenAI
# Initialize client with Javelin endpoint
client = OpenAI(
base_url="https://api.javelin.live/v1/query/your_route",
api_key="your_api_key"
)
# Make requests using standard OpenAI format
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"}
]
)
import os
from openai import OpenAI
import dotenv
dotenv.load_dotenv()
# Configure regular route with Javelin headers
javelin_api_key = os.getenv("JAVELIN_API_KEY")
llm_api_key = os.getenv("OPENAI_API_KEY")
javelin_headers = {
"x-api-key": javelin_api_key,
}
client = OpenAI(
base_url="https://api-dev.javelin.live/v1/query/<route>",
default_headers=javelin_headers
)
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "user", "content": "hello"}
],
)
print(response.model_dump_json(indent=2))
from javelin_sdk import JavelinClient, JavelinConfig
# Setup client configuration
config = JavelinConfig(
base_url="https://api.javelin.live",
javelin_api_key="your_javelin_api_key"
)
client = JavelinClient(config)
# Set headers for universal endpoint
custom_headers = {
"Content-Type": "application/json",
"x-javelin-route": "univ_bedrock" # Change route as needed (univ_azure, univ_bedrock, univ_gemini)
}
client.set_headers(custom_headers)
# Make requests using OpenAI format
response = client.chat.completions.create(
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What are the three primary colors?"}
],
temperature=0.7,
max_tokens=150,
model="amazon.titan-text-express-v1" # Use appropriate model for your endpoint
)
For more detailed examples and integration patterns, check out:
Javelin provides universal endpoints that allow you to use a consistent interface across different LLM providers. Here are the main patterns: