2) LLMs_novel_chain_generation

Jacob Kim·2024년 2월 2일
0

Naver Project Week5

목록 보기
10/12

드라이브 마운트

from google.colab import drive
drive.mount('/content/drive')

패키지 설치

!pip install openai langchain langchain-google-genai
from pprint import pprint
from typing import Dict, List

from langchain.chains import LLMChain, SequentialChain
# from langchain.chat_models import ChatOpenAI
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.prompts.chat import ChatPromptTemplate
from pydantic import BaseModel

OpenAI API key

import getpass
import os

# os.environ["OPENAI_API_KEY"] = getpass.getpass()
os.environ["GOOGLE_API_KEY"] = getpass.getpass()
# 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

Prompt chain 준비

  • 서비스할 내용의 프롬프트 체인을 준비합니다.
  • 각 프롬프트 체인을 미리 준비해 놓고, 템플릿으로 사용합니다.
P_PATH = "/content/drive/MyDrive/dataset/Novel_generation/multi_prompt"
IDEA_P = os.path.join(P_PATH, "extract_idea.txt")
OUTLINE_P = os.path.join(P_PATH, "write_outline.txt")
PLOT_P = os.path.join(P_PATH, "write_plot.txt")
CHAPTER_P = os.path.join(P_PATH, "write_chapter.txt")

Prompt chain 구현

SequentialChain을 이용해서 여러개의 chain을 연속적으로 구현할 수 있습니다.

class UserRequest(BaseModel):
    genre: str
    characters: List[Dict[str, str]]
    text: str


def read_prompt_template(file_path: str) -> str:
    with open(file_path, "r") as f:
        prompt_template = f.read()

    return prompt_template


def create_chain(llm, template_path, output_key):
    return LLMChain(
        llm=llm,
        prompt=ChatPromptTemplate.from_template(
            template=read_prompt_template(template_path),
        ),
        output_key=output_key,
        verbose=True,
    )


def generate_novel(req: UserRequest) -> Dict[str, str]:
    writer_llm = ChatGoogleGenerativeAI(model="gemini-pro")
    #ChatOpenAI(temperature=0.3, max_tokens=500, model="gpt-3.5-turbo")

    # 아이디어 뽑기 체인 생성
    novel_idea_chain = create_chain(writer_llm, IDEA_P, "novel_idea")

    # 아웃라인 작성 체인 생성
    novel_outline_chain = create_chain(
        writer_llm, OUTLINE_P, "novel_outline"
    )

    # 플롯 작성 체인 생성
    novel_plot_chain = create_chain(writer_llm, PLOT_P, "novel_plot")

    # 챕터 작성 체인 생성
    novel_chapter_chain = create_chain(writer_llm, CHAPTER_P, "output")

    preprocess_chain = SequentialChain(
        chains=[
            novel_idea_chain,
            novel_outline_chain,
            novel_plot_chain,
        ],
        input_variables=["genre", "characters", "text"],
        output_variables=["novel_idea", "novel_outline", "novel_plot"],
        verbose=True,
    )

    context = req.dict()
    context = preprocess_chain(context)

    context["novel_chapter"] = []
    for chapter_number in range(1, 5):
        context["chapter_number"] = chapter_number
        context = novel_chapter_chain(context)
        context["novel_chapter"].append(context["output"])

    contents = "\n\n".join(context["novel_chapter"])
    return {"results": contents}

User prompt 작성

  • User가 직접 작성하는 프롬프트를 작성합니다.
user_data = {
    "genre": "판타지",
    "characters": [
        {
            "name": "김철수",
            "role": "주인공"
        },
        {
            "name": "이영희",
            "role": "조연"
        }
    ],
    "text": "날씨가 추워지고 있습니다."
}
  • User Prompt를 입력합니다.
request_instance = UserRequest(**user_data)

Text Generation

generate_novel(request_instance)
profile
AI, Information and Communication, Electronics, Computer Science, Bio, Algorithms

0개의 댓글