Skip to content
This repository has been archived by the owner on Sep 12, 2024. It is now read-only.

Commit

Permalink
major updates on cli module
Browse files Browse the repository at this point in the history
  • Loading branch information
SeeknnDestroy authored and fcakyon committed Jan 23, 2024
1 parent 1e7a268 commit 0855bd5
Show file tree
Hide file tree
Showing 2 changed files with 52 additions and 30 deletions.
54 changes: 34 additions & 20 deletions autollm/serve/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,17 +6,13 @@

from autollm.auto.llm import AutoLiteLLM
from autollm.auto.query_engine import AutoQueryEngine
from autollm.serve.prompts import LLM_BUILDER_SYSTEM_PROMPT
from autollm.serve.llm_utils import create_custom_llm
from autollm.utils.document_reading import read_files_as_documents

llama_index.set_global_handler("simple")

llm_builder = AutoLiteLLM.from_defaults(system_prompt=LLM_BUILDER_SYSTEM_PROMPT)


def configure_app(
openai_api_key, palm_api_key, uploaded_files, webpage_input, what_to_make_area, config_file, emoji,
name, description, instruction):
def create_app(openai_api_key, palm_api_key, what_to_make_area, uploaded_files, webpage_input, config_file):
global query_engine
progress = gr.Progress()

Expand All @@ -26,6 +22,10 @@ def configure_app(
progress(0.2, desc="Reading files...")
file_documents = read_files_as_documents(input_files=uploaded_files)

progress(0.4, desc="Updating LLM...")
custom_llm = create_custom_llm(user_prompt=what_to_make_area, config=config_file)
emoji, name, description, instruction = update_configurations(custom_llm)

progress(0.8, desc="Configuring app..")
query_engine = AutoQueryEngine.from_defaults(
documents=file_documents,
Expand All @@ -36,8 +36,22 @@ def configure_app(

# Complete progress
progress(1.0, desc="Completed") # Complete progress bar
create_preview_output = gr.Textbox("App preview created on the right screen.")

return create_preview_output, emoji, name, description, instruction


def update_configurations(custom_llm):
emoji = custom_llm.emoji
name = custom_llm.name
description = custom_llm.description
instruction = custom_llm.instructions

return gr.Textbox(emoji), gr.Textbox(name), gr.Textbox(description), gr.Textbox(instruction)


return gr.Textbox("App preview created on the right screen.")
def update_app():
pass


def predict(message, history):
Expand Down Expand Up @@ -89,15 +103,15 @@ def predict(message, history):
with gr.Accordion(label="Load config file", open=False):
config_file_upload = gr.File(
label="Configurations of LLM, Vector Store..", file_count="single")
emoji_input = gr.Textbox(label="Emoji")
name_input = gr.Textbox(label="Name")
description_input = gr.Textbox(label="Description")
instruction_input = gr.TextArea(label="Instructions")
emoji = gr.Textbox(label="Emoji")
name = gr.Textbox(label="Name")
description = gr.Textbox(label="Description")
instruction = gr.TextArea(label="Instructions")
with gr.Row():
with gr.Column(scale=1, min_width=10):
placeholder = gr.Button(visible=False, interactive=False)
with gr.Column(scale=1, min_width=100):
create_preview_button_2 = gr.Button("Create Preview", variant="primary")
update_preview_button = gr.Button("Update Preview", variant="primary")
configure_output = gr.Textbox(label="👆 Click `Create Preview` to see preview of the LLM app")
with gr.Tab("Export"):
# Controls for 'Export' tab
Expand All @@ -121,18 +135,18 @@ def predict(message, history):
chat_interface = gr.ChatInterface(predict, chatbot=chatbot)

create_preview_button.click(
configure_app,
create_app,
inputs=[
openai_api_key_input, palm_api_key_input, uploaded_files, webpage_input, what_to_make_area,
config_file_upload, emoji_input, name_input, description_input, instruction_input
openai_api_key_input, palm_api_key_input, what_to_make_area, uploaded_files, webpage_input,
config_file_upload
],
outputs=[create_preview_output])
outputs=[create_preview_output, emoji, name, description, instruction])

create_preview_button_2.click(
configure_app,
update_preview_button.click(
update_app,
inputs=[
openai_api_key_input, palm_api_key_input, uploaded_files, webpage_input, what_to_make_area,
config_file_upload, emoji_input, name_input, description_input, instruction_input
openai_api_key_input, palm_api_key_input, what_to_make_area, uploaded_files, webpage_input,
config_file_upload, emoji, name, description, instruction
],
outputs=[configure_output],
scroll_to_output=True)
Expand Down
28 changes: 18 additions & 10 deletions autollm/serve/llm_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,13 @@

from autollm import AutoLiteLLM

DEFAULT_LLM_MODEL = "azure/gpt-4-1106"
DEFAULT_LLM_MAX_TOKENS = 1024
DEFAULT_LLM_TEMPERATURE = 0.1


class CustomLLM(BaseModel):
"""Data model for custom LLM creation."""
"""Data model for custom LLM creation from user prompt."""

emoji: str = Field(
...,
Expand Down Expand Up @@ -52,17 +56,18 @@ class CustomLLM(BaseModel):


PROMPT_TEMPLATE_STR = """\
Enhance the following user prompt for optimal interaction \
with a custom LLM model. Ensure the revised prompt maintains the \
original intent, is clear and detailed, and is adapted to the \
specific context and task mentioned in the user input.
User Input: {user_prompt}
Your task is to revise the user prompt and create a JSON object \
in the format of the CustomLLM data model. The JSON object will \
be used to create a custom LLM model. Ensure the revised prompt \
maintains the original intent, is clear and detailed, and is \
adapted to the specific context and task mentioned in the user input.
1. Analyze the basic prompt to understand its primary purpose and context.
2. Refine the prompt to be clear, detailed, specific, and tailored to the context and task.
3. Retain the core elements and intent of the original prompt.
4. Provide an enhanced version of the prompt, ensuring it is optimized for a LLM model interaction.
User prompt: {user_prompt}
"""


Expand All @@ -71,9 +76,12 @@ def create_custom_llm(user_prompt: str, config: Optional[Any] = None) -> CustomL
if not user_prompt:
raise ValueError("Please fill in the area of 'What would you like to make?'")

llm_model = config.get('llm_model', 'azure/gpt-4-1106')
llm_max_tokens = config.get('llm_max_tokens', 1024)
llm_temperature = config.get('llm_temperature', 0.1)
if not config:
config = {}

llm_model = config.get('llm_model', DEFAULT_LLM_MODEL)
llm_max_tokens = config.get('llm_max_tokens', DEFAULT_LLM_MAX_TOKENS)
llm_temperature = config.get('llm_temperature', DEFAULT_LLM_TEMPERATURE)
llm_api_base = config.get('llm_api_base', None)

llm = AutoLiteLLM.from_defaults(
Expand Down

0 comments on commit 0855bd5

Please sign in to comment.