From 5818c9913f27843477670728a36c116990427ff7 Mon Sep 17 00:00:00 2001 From: robinrolle Date: Sat, 12 Apr 2025 13:07:17 +0200 Subject: [PATCH] add structured output llm call --- .env.example | 3 +- requirements.txt | 4 ++- .../llm_call_structured_output.py | 35 +++++++++++++++++++ 3 files changed, 40 insertions(+), 2 deletions(-) create mode 100644 utils/decisions_makers/llm_call_structured_output.py diff --git a/.env.example b/.env.example index 293ecce..01ea49c 100644 --- a/.env.example +++ b/.env.example @@ -1,3 +1,4 @@ API_URI= API_KEY= -API_TEAM= \ No newline at end of file +API_TEAM= +GROQ_API_KEY=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 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index bc715e1..52d6bbe 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,4 +14,6 @@ pypdfium2==4.30.1 pytesseract==0.3.13 requests==2.32.3 urllib3==2.4.0 -pydantic==2.11.3 \ No newline at end of file +pydantic==2.11.3 +langchain==0.3.23 +langchain-groq==0.3.2 diff --git a/utils/decisions_makers/llm_call_structured_output.py b/utils/decisions_makers/llm_call_structured_output.py new file mode 100644 index 0000000..24eb3be --- /dev/null +++ b/utils/decisions_makers/llm_call_structured_output.py @@ -0,0 +1,35 @@ +from langchain_core.runnables import Runnable +from langchain_core.prompts import ChatPromptTemplate +from langchain_core.output_parsers import PydanticOutputParser +from langchain_groq import ChatGroq +from pydantic import BaseModel, Field + + +# Step 1: Define the structured output +class CountryAnswer(BaseModel): + answer: str = Field(..., description="La réponse à la question") + country: str = Field(..., description="Le pays concerné") + +# Step 2: Create the output parser +parser = PydanticOutputParser(pydantic_object=CountryAnswer) + +# Step 3: Create the prompt +prompt = ChatPromptTemplate.from_template( + "Tu es un assistant utile. Réponds à la question : {question}\n" + "Réponds uniquement en JSON avec ce format :\n{format_instructions}" +) + +# Step 4: LLM configuration +llm = ChatGroq(model_name="llama3-70b-8192", temperature=0.7) + +# Step 5: Combine everything +chain: Runnable = prompt | llm | parser + +# Step 6: Run the chain +response = chain.invoke({ + "question": "Quelle est la capitale de la Suisse ?", + "format_instructions": parser.get_format_instructions() +}) + +# Result +print(response) \ No newline at end of file