Skip to content

Commit

Permalink
Merge branch 'aws-samples:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
ystoneman authored May 9, 2024
2 parents 8fad532 + 47d7499 commit 1a0cd1b
Show file tree
Hide file tree
Showing 6 changed files with 136 additions and 2 deletions.
5 changes: 4 additions & 1 deletion cli/magic-config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -630,7 +630,10 @@ async function processCreateOptions(options: any): Promise<void> {
options.kendraExternal.length > 0) ||
false,
skip(): boolean {
return !(this as any).state.answers.enableRag;
if (!(this as any).state.answers.enableRag){
return true;
}
return !(this as any).state.answers.ragsToEnable.includes("kendra");
},
},
];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,4 @@
from .cohere import *
from .llama2_chat import *
from .mistral import *
from .llama3 import *
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
import genai_core.clients

# from langchain.llms import Bedrock (pending https://github.com/langchain-ai/langchain/issues/13316)
from .base import Bedrock

from langchain.prompts.prompt import PromptTemplate


from ..shared.meta.llama3_instruct import (
Llama3PromptTemplate,
Llama3QAPromptTemplate,
Llama3CondensedQAPromptTemplate,
)
from ..shared.meta.llama3_instruct import Llama3ConversationBufferMemory

from ..base import ModelAdapter
from genai_core.registry import registry


class BedrockMetaLLama3InstructAdapter(ModelAdapter):
def __init__(self, model_id, *args, **kwargs):
self.model_id = model_id

super().__init__(*args, **kwargs)

def get_memory(self, output_key=None, return_messages=False):
return Llama3ConversationBufferMemory(
memory_key="chat_history",
chat_memory=self.chat_history,
return_messages=return_messages,
output_key=output_key,
)

def get_llm(self, model_kwargs={}):
bedrock = genai_core.clients.get_bedrock_client()

params = {}
if "temperature" in model_kwargs:
params["temperature"] = model_kwargs["temperature"]
if "topP" in model_kwargs:
params["top_p"] = model_kwargs["topP"]
if "maxTokens" in model_kwargs:
params["max_gen_len"] = model_kwargs["maxTokens"]

return Bedrock(
client=bedrock,
model_id=self.model_id,
model_kwargs=params,
streaming=model_kwargs.get("streaming", False),
callbacks=[self.callback_handler],
)

def get_prompt(self):
return Llama3PromptTemplate

def get_qa_prompt(self):
return Llama3QAPromptTemplate

def get_condense_question_prompt(self):
return Llama3CondensedQAPromptTemplate


# Register the adapter
registry.register(
r"^bedrock.meta.llama3-.*-instruct.*",
BedrockMetaLLama3InstructAdapter,
)
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
from .llama2_chat import *
from .llama3_instruct import *
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
import json

from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate

BEGIN_OF_TEXT = "<|begin_of_text|>"
SYSTEM_HEADER = "<|start_header_id|>system<|end_header_id|>"
USER_HEADER = "<|start_header_id|>user<|end_header_id|>"
ASSISTANT_HEADER = "<|start_header_id|>assistant<|end_header_id|>"
EOD = "<|eot_id|>"

Llama3Prompt = f"""{BEGIN_OF_TEXT}{SYSTEM_HEADER}
You are an helpful assistant that provides concise answers to user questions with as little sentences as possible and at maximum 3 sentences. You do not repeat yourself. You avoid bulleted list or emojis.{EOD}{{chat_history}}{USER_HEADER}
{{input}}{EOD}{ASSISTANT_HEADER}"""

Llama3QAPrompt = f"""{BEGIN_OF_TEXT}{SYSTEM_HEADER}
Use the following conversation history and pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. You do not repeat yourself. You avoid bulleted list or emojis.{EOD}{{chat_history}}{USER_HEADER}
Context: {{context}}
{{question}}{EOD}{ASSISTANT_HEADER}"""

Llama3CondensedQAPrompt = f"""{BEGIN_OF_TEXT}{SYSTEM_HEADER}
Given the following conversation and the question at the end, rephrase the follow up input to be a standalone question, in the same language as the follow up input. You do not repeat yourself. You avoid bulleted list or emojis.{EOD}{{chat_history}}{USER_HEADER}
{{question}}{EOD}{ASSISTANT_HEADER}"""


Llama3PromptTemplate = PromptTemplate.from_template(Llama3Prompt)
Llama3QAPromptTemplate = PromptTemplate.from_template(Llama3QAPrompt)
Llama3CondensedQAPromptTemplate = PromptTemplate.from_template(Llama3CondensedQAPrompt)


class Llama3ConversationBufferMemory(ConversationBufferMemory):
@property
def buffer_as_str(self) -> str:
return self.get_buffer_string()

def get_buffer_string(self) -> str:
# See https://llama.meta.com/docs/model-cards-and-prompt-formats/meta-llama-3/
string_messages = []
for m in self.chat_memory.messages:
if isinstance(m, HumanMessage):
message = f"""{USER_HEADER}
{m.content}{EOD}"""

elif isinstance(m, AIMessage):
message = f"""{ASSISTANT_HEADER}
{m.content}{EOD}"""
else:
raise ValueError(f"Got unsupported message type: {m}")
string_messages.append(message)

return "".join(string_messages)
3 changes: 2 additions & 1 deletion lib/shared/layers/python-sdk/python/genai_core/documents.py
Original file line number Diff line number Diff line change
Expand Up @@ -485,6 +485,7 @@ def _process_document(

try:
urls_to_crawl = genai_core.websites.extract_urls_from_sitemap(path)
limit = min(limit, len(urls_to_crawl))

if len(urls_to_crawl) == 0:
set_status(workspace_id, document_id, "error")
Expand Down Expand Up @@ -512,7 +513,7 @@ def _process_document(
"priority_queue": priority_queue,
"processed_urls": [],
"follow_links": follow_links,
"limit": min(limit, len(urls_to_crawl)),
"limit": limit,
"done": False,
},
cls=genai_core.utils.json.CustomEncoder,
Expand Down

0 comments on commit 1a0cd1b

Please sign in to comment.