이 글에서는 번역, 요약, 채팅 등을 웹 상에 간단히 만들어 보고자 한다.
langchain
langchain_openai
langserve
fastapi
uvicorn
sse_starlette
pydantic==1.10.13
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
base_url="http://localhost:1234/v1",
api_key="lm-studio",
model="lmstudio-community/Meta-Llama-3-8B-Instruct-GGUF",
temperature=0.1,
)
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.output_parsers import StrOutputParser
from .model import llm
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful, smart, kind, and efficient AI assistant. You always fulfill the user's requests to the best of your ability. You always answer succinctly. You must answer in Korean."),
("user", "{user_input}"),
])
chain = prompt | llm | StrOutputParser()
from fastapi import FastAPI
from langserve import add_routes
from app.vanilla import chain as vanilla_chain
app = FastAPI()
add_routes(app, vanilla_chain, path="/vanilla",)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="localhost", port=8000)
from fastapi import FastAPI
from langserve import add_routes
from app.vanilla import chain as vanilla_chain
from app.translate import chain as translate_chain
from app.summary import chain as summary_chain
app = FastAPI()
add_routes(app, vanilla_chain, path="/vanilla",)
add_routes(app, translate_chain, path="/translate",)
add_routes(app, summary_chain, path="/summary",)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="localhost", port=8000)
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.output_parsers import StrOutputParser
from .model import llm
prompt = ChatPromptTemplate.from_template(
"Don't say anything else and Translate following sentences into Korean:\n{eng_input}")
chain = prompt | llm | StrOutputParser()
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.output_parsers import StrOutputParser
from .model import llm
prompt = ChatPromptTemplate.from_messages([
('system', "You must summarize the User's sentences tremendously. You always answer into Korean. Don't say anything else."),
('user', "'''{input}'''"),
])
chain = prompt | llm | StrOutputParser()
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.output_parsers import StrOutputParser
from .model import llm
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful, smart, kind, and efficient AI assistant. You always fulfill the user's requests to the best of your ability. You always answer succinctly. You must answer in Korean."),
MessagesPlaceholder(variable_name='messsages1'),
])
chain = prompt | llm | StrOutputParser()
from fastapi import FastAPI
from langserve import add_routes
from typing import List, Union
from langserve.pydantic_v1 import BaseModel, Field
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
from app.vanilla import chain as vanilla_chain
from app.translate import chain as translate_chain
from app.summary import chain as summary_chain
from app.chat import chain as chat_chain
app = FastAPI()
add_routes(app, vanilla_chain, path="/vanilla",)
add_routes(app, translate_chain, path="/translate",)
add_routes(app, summary_chain, path="/summary",)
class InputChat(BaseModel): # 클래스변수이름 템플릿이랑 같게.
messsages1: List[Union[HumanMessage, AIMessage, SystemMessage]] = Field(
...,
description="The chat messages representing the current conversation.",
)
add_routes(
app,
chat_chain.with_types(input_type=InputChat),
path="/chat",
enable_feedback_endpoint=True,
enable_public_trace_link_endpoint=True,
playground_type="chat",
)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="localhost", port=8000)
from app.model import llm
add_routes(app, llm, path="/llm",)
from langserve import RemoteRunnable
llm2 = RemoteRunnable("http://localhost:8000/llm")
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful, smart, kind, and efficient AI assistant."),
("user", "{input} 한국어로 대답해줘."),
])
chain = prompt | llm2 | StrOutputParser()
msg = chain.invoke({'input' : '안녕하세요!'})