import promptguard
from openai import OpenAI
import chromadb
# Initialize PromptGuard with response scanning
promptguard.init(
api_key="pg_xxx",
mode="enforce",
scan_responses=True,
)
# Initialize clients
openai_client = OpenAI()
chroma_client = chromadb.Client()
collection = chroma_client.get_or_create_collection("documents")
def secure_rag_query(user_query: str) -> str:
# Step 1: User query is auto-scanned by promptguard.init()
# Step 2: Retrieve relevant documents
results = collection.query(
query_texts=[user_query],
n_results=5
)
# Step 3: Build context from retrieved docs
context = "\n\n".join(results["documents"][0])
# Step 4: LLM call - auto-scanned by PromptGuard
response = openai_client.chat.completions.create(
model="gpt-4",
messages=[
{
"role": "system",
"content": f"Answer based on this context:\n\n{context}"
},
{
"role": "user",
"content": user_query
}
]
)
return response.choices[0].message.content
# Example usage
answer = secure_rag_query("What's the salary for employee John Smith?")
print(answer)