Skip to main content

Install

pip install langchain-openai

Setup

from langchain_openai import ChatOpenAI

llm = ChatOpenAI(
    base_url="https://kymaapi.com/v1",
    api_key="kyma-your-api-key",
    model="llama-3.3-70b",
)

Basic usage

response = llm.invoke("Explain RAG in simple terms")
print(response.content)

With prompt template

from langchain_core.prompts import ChatPromptTemplate

prompt = ChatPromptTemplate.from_messages([
    ("system", "You are a {role}."),
    ("user", "{question}")
])

chain = prompt | llm

response = chain.invoke({
    "role": "Python expert",
    "question": "How do decorators work?"
})
print(response.content)

Streaming

for chunk in llm.stream("Write a short story"):
    print(chunk.content, end="", flush=True)

RAG example

from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate

llm = ChatOpenAI(
    base_url="https://kymaapi.com/v1",
    api_key="kyma-your-api-key",
    model="llama-3.3-70b",
)

# Use with your retriever
prompt = ChatPromptTemplate.from_messages([
    ("system", "Answer based on the following context:\n\n{context}"),
    ("user", "{question}")
])

chain = prompt | llm

response = chain.invoke({
    "context": "Kyma API is a free LLM gateway...",
    "question": "What is Kyma?"
})