from guardrails import Guard
from guardrails.hub import ProfanityFree
# Create a Guard class
guard = Guard().use(ProfanityFree())
# Function that takes the prompt as a string and returns the LLM output as string
def my_llm_api(
*,
**kwargs
) -> str:
"""Custom LLM API wrapper.
At least one of messages should be provided.
Args:
**kwargs: Any additional arguments to be passed to the LLM API
Returns:
str: The output of the LLM API
"""
messages = kwargs.pop("messages", [])
updated_messages = some_message_processing(messages)
# Call your LLM API here
# What you pass to the llm will depend on what arguments it accepts.
llm_output = some_llm(updated_messages, **kwargs)
return llm_output
# Wrap your LLM API call
validated_response = guard(
my_llm_api,
messages=[{"role":"user","content":"Can you generate a list of 10 things that are not food?"}],
**kwargs,
)