Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Small fixes to cli interface of gpte and bench applications #1157

Merged
merged 3 commits into from
May 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions gpt_engineer/applications/cli/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@
import typer

from dotenv import load_dotenv
from langchain.cache import SQLiteCache
from langchain.globals import set_llm_cache
from langchain_community.cache import SQLiteCache
from termcolor import colored

from gpt_engineer.applications.cli.cli_agent import CliAgent
Expand All @@ -60,7 +60,9 @@
from gpt_engineer.core.prompt import Prompt
from gpt_engineer.tools.custom_steps import clarified_gen, lite_gen, self_heal

app = typer.Typer() # creates a CLI app
app = typer.Typer(
context_settings={"help_option_names": ["-h", "--help"]}
) # creates a CLI app


def load_env_if_needed():
Expand Down Expand Up @@ -247,7 +249,7 @@ def prompt_yesno() -> bool:
)
def main(
project_path: str = typer.Argument(".", help="path"),
model: str = typer.Argument("gpt-4o", help="model id string"),
model: str = typer.Option("gpt-4o", "--model", "-m", help="model id string"),
temperature: float = typer.Option(
0.1,
"--temperature",
Expand Down
25 changes: 20 additions & 5 deletions gpt_engineer/benchmark/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,20 +21,23 @@
"""
import importlib
import os.path
import sys

from typing import Annotated, Optional

import typer

from langchain.cache import SQLiteCache
from langchain.globals import set_llm_cache
from langchain_community.cache import SQLiteCache

from gpt_engineer.applications.cli.main import load_env_if_needed
from gpt_engineer.benchmark.bench_config import BenchConfig
from gpt_engineer.benchmark.benchmarks.load import get_benchmark
from gpt_engineer.benchmark.run import export_yaml_results, print_results, run

app = typer.Typer() # creates a CLI app
app = typer.Typer(
context_settings={"help_option_names": ["-h", "--help"]}
) # creates a CLI app


def get_agent(path):
Expand All @@ -52,6 +55,7 @@ def get_agent(path):
An instance of the imported default configuration agent.
"""
# Dynamically import the python module at path
sys.path.append(os.path.dirname(path))
agent_module = importlib.import_module(path.replace("/", ".").replace(".py", ""))
return agent_module.default_config_agent()

Expand Down Expand Up @@ -79,8 +83,16 @@ def main(
typer.Option(help="print results for each task", show_default=False),
] = None,
verbose: Annotated[
bool, typer.Option(help="print results for each task", show_default=False)
Optional[bool],
typer.Option(help="print results for each task", show_default=False),
] = False,
use_cache: Annotated[
Optional[bool],
typer.Option(
help="Speeds up computations and saves tokens when running the same prompt multiple times by caching the LLM response.",
show_default=False,
),
] = True,
):
"""
The main function that runs the specified benchmarks with the given agent and outputs the results to the console.
Expand All @@ -93,13 +105,16 @@ def main(
Configuration file for choosing which benchmark problems to run. See default config for more details.
yaml_output: Optional[str], default=None
Pass a path to a yaml file to have results written to file.
verbose : bool, default=False
verbose : Optional[bool], default=False
A flag to indicate whether to print results for each task.
use_cache : Optional[bool], default=True
Speeds up computations and saves tokens when running the same prompt multiple times by caching the LLM response.
Returns
-------
None
"""
set_llm_cache(SQLiteCache(database_path=".langchain.db"))
if use_cache:
set_llm_cache(SQLiteCache(database_path=".langchain.db"))
load_env_if_needed()
config = BenchConfig.from_toml(bench_config)
print("using config file: " + bench_config)
Expand Down
Loading