Apr 22, 2025
npmyarnpnpmyarn add langchain @langchain/coreexport LANGSMITH_TRACING="true"
export LANGSMITH_API_KEY="..."
# Optional: Reduce tracing latency
export LANGCHAIN_CALLBACKS_BACKGROUND=true
yarn add @langchain/groqGROQ_API_KEY=your-api-keyimport { ChatGroq } from "@langchain/groq";
const model = new ChatGroq({ model: "llama-3.3-70b-versatile", temperature: 0});
.invoke method with a list of messages to interact with the model.import { HumanMessage, SystemMessage } from "@langchain/core/messages";
const messages = [
new SystemMessage("Translate the following from English into Italian"),
new HumanMessage("hi!"),
];
await model.invoke(messages);
const stream = await model.stream(messages);
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
console.log(`${chunk.content}|`);
}
language and text.import { ChatPromptTemplate } from "@langchain/core/prompts";
const systemTemplate = "Translate the following from English into {language}";
const promptTemplate = ChatPromptTemplate.fromMessages([
["system", systemTemplate],
["user", "{text}"],
]);
const promptValue = await promptTemplate.invoke({ language: "italian", text: "hi!",});
promptValue;
promptValue.toChatMessages();
const response = await model.invoke(promptValue);
console.log(`${response.content}`);