Skip to content

Commit

Permalink
Merge pull request #144 from SWM14-Architect/refactor/answer_service_…
Browse files Browse the repository at this point in the history
…and_test

Refactor/answer service and test
  • Loading branch information
saebyeok0306 authored Nov 6, 2023
2 parents d0f160c + be6ab73 commit 2612b86
Show file tree
Hide file tree
Showing 18 changed files with 265 additions and 206 deletions.
13 changes: 10 additions & 3 deletions moview/config/container/container_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@
from moview.repository.question_answer.question_answer_repository import QuestionAnswerRepository
from moview.service.interview_service import InterviewService
from moview.service.input_data_service import InputDataService
from moview.service.answer_service import AnswerService
from moview.service.answer.answer_service import AnswerService
from moview.service.answer.question_choosing_strategy import RandomQuestionChoosingStrategy
from moview.service.evaluation_service import EvaluationService
from moview.service.feedback_service import FeedbackService
from moview.service.light_mode_service import LightModeService
Expand Down Expand Up @@ -39,18 +40,24 @@ def __init__(self):
# Service
self.user_service = UserService(user_repository=self.user_repository)

self.interview_service = InterviewService(interview_repository=self.interview_repository)
self.interview_service = InterviewService(interview_repository=self.interview_repository,
question_answer_repository=self.question_answer_repository)
self.input_data_service = InputDataService(
input_data_repository=self.input_data_repository,
question_answer_repository=self.question_answer_repository,
initial_question_giver=self.initial_question_giver,
initial_input_analyzer=self.initial_input_analyzer
)

self.choosing_strategy = RandomQuestionChoosingStrategy()

self.answer_service = AnswerService(
interview_repository=self.interview_repository,
question_answer_repository=self.question_answer_repository,
giver=self.followup_question_giver
choosing_strategy=self.choosing_strategy,
followup_question_giver=self.followup_question_giver
)

self.evaluation_service = EvaluationService(
interview_repository=self.interview_repository,
question_answer_repository=self.question_answer_repository,
Expand Down
17 changes: 14 additions & 3 deletions moview/controller/answer_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,23 @@ def post(self):
question_content = request_body['question_content']
answer_content = request_body['answer_content']

interview_service = ContainerConfig().interview_service
answer_service = ContainerConfig().answer_service

try:
chosen_question, saved_id = answer_service.answer(user_id=user_id, interview_id=interview_id,
question_id=question_id, question_content=question_content,
answer_content=answer_content)
interview_dict = interview_service.find_interview(user_id=user_id, interview_id=interview_id)

interview_service.add_latest_question_into_interview(interview_id=interview_id,
interview_dict=interview_dict,
question_id=question_id,
question_content=question_content)

chosen_question, saved_id = answer_service.maybe_give_followup_question_about_latest_answer(
interview_id=interview_id,
question_id=question_id,
question_content=question_content,
answer_content=answer_content)

except RetryExecutionError as e:
error_logger(msg="RETRY EXECUTION ERROR")
raise e
Expand Down
9 changes: 0 additions & 9 deletions moview/environment/llm_factory.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from langchain.chat_models import ChatOpenAI
from moview.environment.environment_loader import EnvironmentLoader

OPENAI_API_KEY_PARAM = "openai-api-key"
Expand All @@ -24,14 +23,6 @@ class LLMModelFactory:
gpt-4-0613 10,000 200
"""

@staticmethod
def create_chat_open_ai(model_name: str = "gpt-3.5-turbo",
temperature: float = 0.5,
request_timeout: int = 60) -> ChatOpenAI:
return ChatOpenAI(openai_api_key=EnvironmentLoader.getenv(OPENAI_API_KEY_PARAM),
temperature=temperature, model_name=model_name,
verbose=False, streaming=False, request_timeout=request_timeout)

@staticmethod
def load_api_key_for_open_ai() -> str:
return EnvironmentLoader.getenv(OPENAI_API_KEY_PARAM)
3 changes: 2 additions & 1 deletion moview/modules/light/light_question_giver.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,10 @@
from moview.utils.prompt_parser import PromptParser
from moview.utils.singleton_meta_class import SingletonMeta
from moview.decorator.retry_decorator import retry
from moview.utils.mixin.directory_mixin import DirectoryMixin


class LightQuestionGiver(metaclass=SingletonMeta):
class LightQuestionGiver(DirectoryMixin, metaclass=SingletonMeta):

def __init__(self, prompt_loader: PromptLoader):
self.prompt = prompt_loader.load_prompt_json(LightQuestionGiver.__name__)
Expand Down
3 changes: 2 additions & 1 deletion moview/modules/question_generator/followup_question_giver.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,10 @@
from moview.config.loggers.mongo_logger import prompt_result_logger
from moview.decorator.retry_decorator import retry
from moview.utils.singleton_meta_class import SingletonMeta
from moview.utils.mixin.directory_mixin import DirectoryMixin


class FollowUpQuestionGiver(metaclass=SingletonMeta):
class FollowUpQuestionGiver(DirectoryMixin, metaclass=SingletonMeta):

def __init__(self, prompt_loader: PromptLoader):
self.prompt = prompt_loader.load_prompt_json(FollowUpQuestionGiver.__name__)
Expand Down
Empty file.
95 changes: 95 additions & 0 deletions moview/service/answer/answer_service.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
from typing import Optional, Tuple
from moview.config.loggers.mongo_logger import execution_trace_logger
from moview.service.answer.question_choosing_strategy import QuestionChoosingStrategy
from moview.service.answer.followup_question_determiner import FollowupQuestionDeterminer
from moview.repository.question_answer.question_answer_repository import QuestionAnswerRepository
from moview.repository.interview_repository import InterviewRepository
from moview.domain.entity.question_answer.question_document import Question
from moview.domain.entity.question_answer.answer_document import Answer
from moview.modules.question_generator import FollowUpQuestionGiver
from moview.utils.singleton_meta_class import SingletonMeta
from moview.utils.prompt_parser import PromptParser


class AnswerService(metaclass=SingletonMeta):

def __init__(self, interview_repository: InterviewRepository, question_answer_repository: QuestionAnswerRepository,
choosing_strategy: QuestionChoosingStrategy,
followup_question_giver: FollowUpQuestionGiver):
self.interview_repository = interview_repository
self.question_answer_repository = question_answer_repository
self.choosing_strategy = choosing_strategy
self.followup_question_giver = followup_question_giver

# todo 이 메서드 자체에 transaction 처리가 필요함.
def maybe_give_followup_question_about_latest_answer(self, interview_id: str, question_id: str,
question_content: str, answer_content: str) -> \
Tuple[Optional[str], Optional[str]]:

"""
Args:
interview_id: 인터뷰 세션 id
question_id: 최근에 답변했던 질문의 id
question_content: 최근에 답변했던 질문의 내용
answer_content: 최근의 답변했던 답변의 내용
Returns: 꼬리 질문을 낼 필요가 있다면, 꼬리질문 내용과 str(꼬리질문의 id)를 반환한다. 그렇지 않다면 None, None을 반환한다.
"""

self.__save_latest_answer(answer_content=answer_content, question_id=question_id)

if FollowupQuestionDeterminer.need_to_give_followup_question():

execution_trace_logger(msg="NEED_TO_GIVE_FOLLOWUP_QUESTION")

followup_question_content = self.followup_question_giver.give_followup_question(question=question_content,
answer=answer_content)

parsed_questions = PromptParser.parse_question(followup_question_content)

if parsed_questions:
chosen_question = self.choosing_strategy.choose_question(parsed_questions)

saved_followup_question_id = self.__save_followup_question(interview_id=interview_id,
question_id=question_id,
followup_question_content=chosen_question)
return chosen_question, str(saved_followup_question_id)

else:
execution_trace_logger(msg="NO_FOLLOWUP_QUESTION")
return None, None
else:
execution_trace_logger(msg="NO_FOLLOWUP_QUESTION")

return None, None

def __save_latest_answer(self, answer_content: str, question_id: str):
execution_trace_logger(msg="CREATE_AND_SAVE_ANSWER")

answer = Answer(content=answer_content,
question_id={
"#ref": self.question_answer_repository.collection.name,
"#id": question_id,
"#db": self.question_answer_repository.db.name
})

self.question_answer_repository.save_answer(answer)

def __save_followup_question(self, interview_id: str, question_id: str, followup_question_content: str):
execution_trace_logger(msg="CREATE_AND_SAVE_FOLLOWUP_QUESTION")

followup_question = Question(content=followup_question_content, feedback_score=0,
interview_id={
"#ref": self.interview_repository.collection.name,
"#id": interview_id,
"#db": self.interview_repository.db.name
},
prev_question_id={
"#ref": self.question_answer_repository.collection.name,
"#id": question_id, # question_id를 가리킴으로써, 꼬리질문임을 나타낸다.
"#db": self.question_answer_repository.db.name
})

return self.question_answer_repository.save_question(followup_question).inserted_id
13 changes: 13 additions & 0 deletions moview/service/answer/followup_question_determiner.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import random
from moview.utils.mixin.directory_mixin import DirectoryMixin


class FollowupQuestionDeterminer(DirectoryMixin):

@staticmethod
def need_to_give_followup_question() -> bool:
base_probability_of_question = 0.5

need = random.random() < base_probability_of_question

return need
20 changes: 20 additions & 0 deletions moview/service/answer/question_choosing_strategy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import random
from typing import List


class QuestionChoosingStrategy:
"""
질문을 선택하는 행동에 대한 전략 인터페이스를 정의합니다
"""

def choose_question(self, parsed_questions: List[str]) -> str:
raise NotImplementedError("Choosing strategy must implement choose_question method.")


class RandomQuestionChoosingStrategy(QuestionChoosingStrategy):
"""
질문을 랜덤하게 선택하는 전략을 정의합니다(구체 클래스)
"""

def choose_question(self, parsed_questions: List[str]) -> str:
return random.choice(parsed_questions)
144 changes: 0 additions & 144 deletions moview/service/answer_service.py

This file was deleted.

Loading

0 comments on commit 2612b86

Please sign in to comment.