RAG - 대화형 Chatbot & Memory
Retriever가 뽑아온 컨텍스트를 기반으로 LLM이 답변하도록 설계
기본 흐름:
추가: Memory를 활용해 대화 맥락 유지
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(
memory_key="chat_history",
return_messages=True
)
from langchain.memory import ConversationSummaryMemory
from langchain_openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo", temperature=0)
memory = ConversationSummaryMemory(llm=llm)
from langchain.chains import ConversationalRetrievalChain
qa = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
memory=memory
)
qa({"question": "오늘 배운 내용을 요약해줘"})