-
Notifications
You must be signed in to change notification settings - Fork 0
/
app.py
136 lines (110 loc) · 4.76 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
from flask import Flask, request, jsonify, send_from_directory
from flask_cors import CORS
import os
from dotenv import load_dotenv
import openai
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core.postprocessor import MetadataReplacementPostProcessor
import chromadb
import json
app = Flask(
__name__, static_folder="user-query-form/build", static_url_path=""
) # Updated
CORS(app, supports_credentials=True)
# Load environment variables
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
llm = OpenAI(model="gpt-3.5-turbo-0125")
if os.getenv("CHROMA_DB_HOST"):
host = os.getenv("CHROMA_DB_HOST")
port = int(os.getenv("CHROMA_DB_PORT", "8000"))
db = chromadb.HttpClient(host=host, port=port)
else:
db = chromadb.PersistentClient(path="./chroma_db")
embed_model = OpenAIEmbedding(model="text-embedding-3-small", embed_batch_size=50)
sentence_window_collection = db.get_or_create_collection("sentence_window")
sentence_window_vector_store = ChromaVectorStore(
chroma_collection=sentence_window_collection
)
sentence_index = VectorStoreIndex.from_vector_store(
sentence_window_vector_store, embed_model=embed_model
)
# Store session-specific ChatMemoryBuffers in server memory
chat_memories = {}
def get_chat_memory(uniqueID):
chat_memory_id = uniqueID
if chat_memory_id not in chat_memories:
# Initialize a new ChatMemoryBuffer for this session
print("new session started with id: " + chat_memory_id)
chat_memories[chat_memory_id] = ChatMemoryBuffer.from_defaults(token_limit=3900)
return chat_memories[chat_memory_id]
def getMessagesFromChatBuffer(chat_memory):
chat_history = chat_memory.get_all()
messages = [
{"role": message.role, "content": message.content} for message in chat_history
]
return messages
def getSuggestedQuestions(messages):
system_prompt = {
"role": "system",
"content": "The following is a conversation about the HBS faculty with a chatbot that has access to HBS faculty information",
}
prompt = {
"role": "user",
"content": "Give me three follow up questions based on this conversation I can ask you, return them as JSON with the three questions under the 'data' key as follows { 'data': ['question 1' , 'question 2', 'question 3'] } ",
}
all_messages = [system_prompt, *messages, prompt]
response = openai.chat.completions.create(
model="gpt-3.5-turbo",
messages=all_messages,
response_format={"type": "json_object"},
)
return response.choices[0].message
@app.route("/chat", methods=["POST"])
def chat():
uniqueID = request.headers.get("uniqueID")
chat_memory = get_chat_memory(
uniqueID
) # Retrieve or initialize the session-specific ChatMemoryBuffer
question = request.json.get("question")
if not question:
return jsonify({"error": "No question provided"}), 400
response = sentence_index.as_chat_engine(
similarity_top_k=5,
memory=chat_memory, # Use the session-specific memory here
chat_mode="condense_plus_context",
node_postprocessors=[
MetadataReplacementPostProcessor(target_metadata_key="window")
],
context_prompt=(
"You are a chatbot, able to have normal interactions, as well as talk"
" about the HBS faculty. Focus your answers on including information from the relevant documents on HBS."
"Here are the relevant documents for the context:\n"
"{context_str}"
"\nInstruction: Use the previous chat history, or the context above, to interact and help the user. Ensure that the responses are relevant to HBS."
),
).chat(question)
return jsonify({"response": response.response})
@app.route("/follow-up-questions", methods=["POST"])
def follow_up_questions():
uniqueID = request.headers.get("uniqueID")
if uniqueID not in chat_memories:
return jsonify({"error": "Session not found"}), 404
chat_memory = chat_memories[uniqueID]
messages = getMessagesFromChatBuffer(chat_memory)
follow_ups = getSuggestedQuestions(messages)
parsed_content = json.loads(follow_ups.content)
return jsonify({"questions": parsed_content["data"]})
@app.route("/", defaults={"path": ""}) # Updated
@app.route("/<path:path>") # Updated
def serve(path):
if path != "" and os.path.exists(app.static_folder + "/" + path):
return send_from_directory(app.static_folder, path)
else:
return send_from_directory(app.static_folder, "index.html")
if __name__ == "__main__":
app.run(debug=True)