forked from aws-samples/aws-genai-llm-chatbot
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch 'aws-samples:main' into main
- Loading branch information
Showing
6 changed files
with
136 additions
and
2 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -4,3 +4,4 @@ | |
from .cohere import * | ||
from .llama2_chat import * | ||
from .mistral import * | ||
from .llama3 import * |
67 changes: 67 additions & 0 deletions
67
lib/model-interfaces/langchain/functions/request-handler/adapters/bedrock/llama3.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,67 @@ | ||
import genai_core.clients | ||
|
||
# from langchain.llms import Bedrock (pending https://github.com/langchain-ai/langchain/issues/13316) | ||
from .base import Bedrock | ||
|
||
from langchain.prompts.prompt import PromptTemplate | ||
|
||
|
||
from ..shared.meta.llama3_instruct import ( | ||
Llama3PromptTemplate, | ||
Llama3QAPromptTemplate, | ||
Llama3CondensedQAPromptTemplate, | ||
) | ||
from ..shared.meta.llama3_instruct import Llama3ConversationBufferMemory | ||
|
||
from ..base import ModelAdapter | ||
from genai_core.registry import registry | ||
|
||
|
||
class BedrockMetaLLama3InstructAdapter(ModelAdapter): | ||
def __init__(self, model_id, *args, **kwargs): | ||
self.model_id = model_id | ||
|
||
super().__init__(*args, **kwargs) | ||
|
||
def get_memory(self, output_key=None, return_messages=False): | ||
return Llama3ConversationBufferMemory( | ||
memory_key="chat_history", | ||
chat_memory=self.chat_history, | ||
return_messages=return_messages, | ||
output_key=output_key, | ||
) | ||
|
||
def get_llm(self, model_kwargs={}): | ||
bedrock = genai_core.clients.get_bedrock_client() | ||
|
||
params = {} | ||
if "temperature" in model_kwargs: | ||
params["temperature"] = model_kwargs["temperature"] | ||
if "topP" in model_kwargs: | ||
params["top_p"] = model_kwargs["topP"] | ||
if "maxTokens" in model_kwargs: | ||
params["max_gen_len"] = model_kwargs["maxTokens"] | ||
|
||
return Bedrock( | ||
client=bedrock, | ||
model_id=self.model_id, | ||
model_kwargs=params, | ||
streaming=model_kwargs.get("streaming", False), | ||
callbacks=[self.callback_handler], | ||
) | ||
|
||
def get_prompt(self): | ||
return Llama3PromptTemplate | ||
|
||
def get_qa_prompt(self): | ||
return Llama3QAPromptTemplate | ||
|
||
def get_condense_question_prompt(self): | ||
return Llama3CondensedQAPromptTemplate | ||
|
||
|
||
# Register the adapter | ||
registry.register( | ||
r"^bedrock.meta.llama3-.*-instruct.*", | ||
BedrockMetaLLama3InstructAdapter, | ||
) |
1 change: 1 addition & 0 deletions
1
lib/model-interfaces/langchain/functions/request-handler/adapters/shared/meta/__init__.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1 +1,2 @@ | ||
from .llama2_chat import * | ||
from .llama3_instruct import * |
61 changes: 61 additions & 0 deletions
61
...el-interfaces/langchain/functions/request-handler/adapters/shared/meta/llama3_instruct.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,61 @@ | ||
import json | ||
|
||
from langchain.schema import AIMessage, HumanMessage, SystemMessage | ||
from langchain.memory import ConversationBufferMemory | ||
from langchain.prompts import PromptTemplate | ||
|
||
BEGIN_OF_TEXT = "<|begin_of_text|>" | ||
SYSTEM_HEADER = "<|start_header_id|>system<|end_header_id|>" | ||
USER_HEADER = "<|start_header_id|>user<|end_header_id|>" | ||
ASSISTANT_HEADER = "<|start_header_id|>assistant<|end_header_id|>" | ||
EOD = "<|eot_id|>" | ||
|
||
Llama3Prompt = f"""{BEGIN_OF_TEXT}{SYSTEM_HEADER} | ||
You are an helpful assistant that provides concise answers to user questions with as little sentences as possible and at maximum 3 sentences. You do not repeat yourself. You avoid bulleted list or emojis.{EOD}{{chat_history}}{USER_HEADER} | ||
{{input}}{EOD}{ASSISTANT_HEADER}""" | ||
|
||
Llama3QAPrompt = f"""{BEGIN_OF_TEXT}{SYSTEM_HEADER} | ||
Use the following conversation history and pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. You do not repeat yourself. You avoid bulleted list or emojis.{EOD}{{chat_history}}{USER_HEADER} | ||
Context: {{context}} | ||
{{question}}{EOD}{ASSISTANT_HEADER}""" | ||
|
||
Llama3CondensedQAPrompt = f"""{BEGIN_OF_TEXT}{SYSTEM_HEADER} | ||
Given the following conversation and the question at the end, rephrase the follow up input to be a standalone question, in the same language as the follow up input. You do not repeat yourself. You avoid bulleted list or emojis.{EOD}{{chat_history}}{USER_HEADER} | ||
{{question}}{EOD}{ASSISTANT_HEADER}""" | ||
|
||
|
||
Llama3PromptTemplate = PromptTemplate.from_template(Llama3Prompt) | ||
Llama3QAPromptTemplate = PromptTemplate.from_template(Llama3QAPrompt) | ||
Llama3CondensedQAPromptTemplate = PromptTemplate.from_template(Llama3CondensedQAPrompt) | ||
|
||
|
||
class Llama3ConversationBufferMemory(ConversationBufferMemory): | ||
@property | ||
def buffer_as_str(self) -> str: | ||
return self.get_buffer_string() | ||
|
||
def get_buffer_string(self) -> str: | ||
# See https://llama.meta.com/docs/model-cards-and-prompt-formats/meta-llama-3/ | ||
string_messages = [] | ||
for m in self.chat_memory.messages: | ||
if isinstance(m, HumanMessage): | ||
message = f"""{USER_HEADER} | ||
{m.content}{EOD}""" | ||
|
||
elif isinstance(m, AIMessage): | ||
message = f"""{ASSISTANT_HEADER} | ||
{m.content}{EOD}""" | ||
else: | ||
raise ValueError(f"Got unsupported message type: {m}") | ||
string_messages.append(message) | ||
|
||
return "".join(string_messages) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters