지난번에 LM-Studio에서 llm 서버를 동작하여 Obsidian에서 챗봇을 돌려보았다.
이 글에서는 파이썬 코드상에서 langchain을 llm 서버에 연결하여 동작시켜보고자 한다.
pip install langchain langchain_openai
from langchain_openai import ChatOpenAI
from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
llm = ChatOpenAI(
base_url="http://localhost:1234/v1",
api_key="lm-studio",
model="lmstudio-community/Meta-Llama-3-8B-Instruct-GGUF",
temperature=0.1,
streaming=True,
callbacks=[StreamingStdOutCallbackHandler()], # 스트림 출력 콜백
)
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
prompt = ChatPromptTemplate.from_template(
"{input} 한국어로 답변해줘."
)
chain = prompt | llm | StrOutputParser()
response = chain.invoke("안녕!")
# response = chain.invoke({'input' : "안녕!"})
안녕하세요! 😊한국어로 답변하겠습니다. 무엇을 도와드릴까요? 🤔
for t in chain.stream("안녕!"):
print(t, end='')
chain = prompt | llm | StrOutputParser()
response = chain.invoke("안녕!")
response
'안녕하세요! 😊한국어로 답변하겠습니다. 무엇을 도와드릴까요? 🤔'
chain = prompt | llm# | StrOutputParser()
response = chain.invoke("안녕!")
response
AIMessage(content='안녕하세요! 😊한국어로 답변하겠습니다! 무엇을 도와드릴까요? 🤔', response_metadata={'token_usage': {'completion_tokens': 20, 'prompt_tokens': 56, 'total_tokens': 76}, 'model_name': 'lmstudio-community/Meta-Llama-3-8B-Instruct-GGUF', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-da48ac13-8be5-47d2-9d63-b86b462dbc09-0')
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful, smart, kind, and efficient AI assistant. You always fulfill the user's requests to the best of your ability. You must always answer in Korean."),
("user", "{input}")
])