Documentation Index
Fetch the complete documentation index at: https://snowglobe.so/docs/llms.txt
Use this file to discover all available pages before exploring further.
Guardrails has support for 100+ LLMs through its integration with LiteLLM. This integration allows the Guardrails call API to use the same clean interface that LiteLLM and OpenAI use.
To interact with a model, set the desired LLM API KEY and specify the model with the model property.
OpenAI
Basic usage
from guardrails import Guard
import os
os.environ["OPENAI_API_KEY"] = "YOUR_OPEN_AI_API_KEY"
guard = Guard()
result = guard(
messages=[{"role":"user", "content":"How many moons does Jupiter have?"}],
model="gpt-4o",
)
print(f"{result.validated_output}")
Streaming
from guardrails import Guard
import os
os.environ["OPENAI_API_KEY"] = "YOUR_OPEN_AI_API_KEY"
guard = Guard()
stream_chunk_generator = guard(
messages=[{"role":"user", "content":"How many moons does Jupiter have?"}],
model="gpt-4o",
stream=True,
)
for chunk in stream_chunk_generator:
print(f"{chunk.validated_output}")
from pydantic import BaseModel, Field
from typing import List
from guardrails import Guard
import os
os.environ["OPENAI_API_KEY"] = "YOUR_OPEN_AI_API_KEY"
class Fruit(BaseModel):
name: str
color: str
class Basket(BaseModel):
fruits: List[Fruit]
guard = Guard.for_pydantic(Basket)
result = guard(
messages=[{"role":"user", "content":"Generate a basket of 5 fruits"}],
model="gpt-4o",
tools=guard.json_function_calling_tool([]),
tool_choice="required",
)
print(f"{result.validated_output}")
Anthropic
Basic usage
from guardrails import Guard
import os
guard = Guard()
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
result = guard(
messages=[{"role":"user", "content":"How many moons does Jupiter have?"}],
model="claude-3-opus-20240229"
)
print(f"{result.validated_output}")
Streaming
from guardrails import Guard
import os
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
guard = Guard()
stream_chunk_generator = guard(
messages=[{"role":"user", "content":"How many moons does Jupiter have?"}],
model="claude-3-opus-20240229",
stream=True,
)
for chunk in stream_chunk_generator:
print(f"{chunk.validated_output}")
Azure OpenAI
Basic usage
from guardrails import Guard
import os
os.environ["AZURE_API_KEY"] = "" # "my-azure-api-key"
os.environ["AZURE_API_BASE"] = "" # "https://example-endpoint.openai.azure.com"
os.environ["AZURE_API_VERSION"] = "" # "2023-05-15"
guard = Guard()
result = guard(
model="azure/<your_deployment_name>",
messages=[{"role":"user", "content":"How many moons does Jupiter have?"}],
)
print(f"{result.validated_output}")
Streaming
from guardrails import Guard
import os
os.environ["AZURE_API_KEY"] = "" # "my-azure-api-key"
os.environ["AZURE_API_BASE"] = "" # "https://example-endpoint.openai.azure.com"
os.environ["AZURE_API_VERSION"] = "" # "2023-05-15"
guard = Guard()
stream_chunk_generator = guard(
messages=[{"role":"user", "content":"How many moons does Jupiter have?"}],
model="azure/<your_deployment_name>",
stream=True
)
for chunk in stream_chunk_generator:
print(f"{chunk.validated_output}")
from pydantic import BaseModel, Field
from typing import List
from guardrails import Guard
import os
os.environ["AZURE_API_KEY"] = "" # "my-azure-api-key"
os.environ["AZURE_API_BASE"] = "" # "https://example-endpoint.openai.azure.com"
os.environ["AZURE_API_VERSION"] = "" # "2023-05-15"
class Fruit(BaseModel):
name: str
color: str
class Basket(BaseModel):
fruits: List[Fruit]
guard = Guard.for_pydantic(Basket)
result = guard(
messages=[{"role":"user", "content":"Generate a basket of 5 fruits"}],
model="azure/<your_deployment_name>",
tools=guard.add_json_function_calling_tool([]),
tool_choice="required",
)
print(f"{result.validated_output}")
Gemini
Basic usage
from guardrails import Guard
import os
os.environ['GEMINI_API_KEY'] = ""
guard = Guard()
result = guard(
messages=[{"role":"user", "content":"How many moons does Jupiter have?"}],
model="gemini/gemini-pro"
)
print(f"{result.validated_output}")
Streaming
from guardrails import Guard
import os
os.environ['GEMINI_API_KEY'] = ""
guard = Guard()
stream_chunk_generator = guard(
messages=[{"role":"user", "content":"How many moons does Jupiter have?"}],
model="gemini/gemini-pro",
stream=True
)
for chunk in stream_chunk_generator:
print(f"{chunk.validated_output}")
Databricks
Basic usage
from guardrails import Guard
import os
os.environ["DATABRICKS_API_KEY"] = "" # your databricks key
os.environ["DATABRICKS_API_BASE"] = "" # e.g.: https://abc-123ab12a-1234.cloud.databricks.com
guard = Guard()
result = guard(
messages=[{"role":"user", "content":"How many moons does Jupiter have?"}],
model="databricks/databricks-dbrx-instruct",
)
print(f"{result.validated_output}")
Streaming
from guardrails import Guard
import os
os.environ["DATABRICKS_API_KEY"] = "" # your databricks key
os.environ["DATABRICKS_API_BASE"] = "" # e.g.: https://abc-123ab12a-1234.cloud.databricks.com
guard = Guard()
stream_chunk_generator = guard(
messages=[{"role":"user", "content":"How many moons does Jupiter have?"}],
model="databricks/databricks-dbrx-instruct",
stream=True,
)
for chunk in stream_chunk_generator:
print(f"{chunk.validated_output}")
Other LLMs
Over 100 LLMs are supported through our LiteLLM integration, including:
- Anthropic
- AWS Bedrock
- Anyscale
- Huggingface
- Mistral
- Predibase
- Fireworks
Find your LLM in LiteLLM’s documentation. Then, follow those same steps and set the same environment variables they guide you to use, but invoke a Guard object instead of the litellm object.
Guardrails will wire through the arguments to litellm, run the Guarding process, and return a validated outcome.
Custom LLM wrappers
If you’re using an LLM that isn’t natively supported by Guardrails and you don’t want to use LiteLLM, you can build a custom LLM API wrapper.
Create a function that accepts a positional argument for the prompt as a string and any other arguments as keyword args. The function should return the output of the LLM API as a string.
Install ProfanityFree from hub:
guardrails hub install hub://guardrails/profanity_free
from guardrails import Guard
from guardrails.hub import ProfanityFree
# Create a Guard class
guard = Guard().use(ProfanityFree())
# Function that takes the prompt as a string and returns the LLM output as string
def my_llm_api(
*,
**kwargs
) -> str:
"""Custom LLM API wrapper.
At least one of messages should be provided.
Args:
**kwargs: Any additional arguments to be passed to the LLM API
Returns:
str: The output of the LLM API
"""
messages = kwargs.pop("messages", [])
updated_messages = some_message_processing(messages)
# Call your LLM API here
# What you pass to the llm will depend on what arguments it accepts.
llm_output = some_llm(updated_messages, **kwargs)
return llm_output
# Wrap your LLM API call
validated_response = guard(
my_llm_api,
messages=[{"role":"user","content":"Can you generate a list of 10 things that are not food?"}],
**kwargs,
)