From 8f21fda36079e2f014e98d276972b3b143c0875a Mon Sep 17 00:00:00 2001 From: Axel Theorell Date: Wed, 22 May 2024 19:31:48 +0200 Subject: [PATCH 1/3] Small CLI fixes to gpte and bench --- gpt_engineer/applications/cli/main.py | 6 ++++-- gpt_engineer/benchmark/__main__.py | 26 ++++++++++++++++++++------ 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/gpt_engineer/applications/cli/main.py b/gpt_engineer/applications/cli/main.py index 7b4651528f..6efc1c2ec1 100644 --- a/gpt_engineer/applications/cli/main.py +++ b/gpt_engineer/applications/cli/main.py @@ -36,7 +36,7 @@ import typer from dotenv import load_dotenv -from langchain.cache import SQLiteCache +from langchain_community.cache import SQLiteCache from langchain.globals import set_llm_cache from termcolor import colored @@ -60,7 +60,9 @@ from gpt_engineer.core.prompt import Prompt from gpt_engineer.tools.custom_steps import clarified_gen, lite_gen, self_heal -app = typer.Typer() # creates a CLI app +app = typer.Typer( + context_settings={"help_option_names": ["-h", "--help"]} +) # creates a CLI app def load_env_if_needed(): diff --git a/gpt_engineer/benchmark/__main__.py b/gpt_engineer/benchmark/__main__.py index 683ea00f0a..2c4bbdb260 100644 --- a/gpt_engineer/benchmark/__main__.py +++ b/gpt_engineer/benchmark/__main__.py @@ -21,12 +21,12 @@ """ import importlib import os.path - +import sys from typing import Annotated, Optional import typer -from langchain.cache import SQLiteCache +from langchain_community.cache import SQLiteCache from langchain.globals import set_llm_cache from gpt_engineer.applications.cli.main import load_env_if_needed @@ -34,7 +34,9 @@ from gpt_engineer.benchmark.benchmarks.load import get_benchmark from gpt_engineer.benchmark.run import export_yaml_results, print_results, run -app = typer.Typer() # creates a CLI app +app = typer.Typer( + context_settings={"help_option_names": ["-h", "--help"]} +) # creates a CLI app def get_agent(path): @@ -52,6 +54,7 @@ def get_agent(path): An instance of the imported default configuration agent. """ # Dynamically import the python module at path + sys.path.append(os.path.dirname(path)) agent_module = importlib.import_module(path.replace("/", ".").replace(".py", "")) return agent_module.default_config_agent() @@ -79,8 +82,16 @@ def main( typer.Option(help="print results for each task", show_default=False), ] = None, verbose: Annotated[ - bool, typer.Option(help="print results for each task", show_default=False) + Optional[bool], + typer.Option(help="print results for each task", show_default=False), ] = False, + use_cache: Annotated[ + Optional[bool], + typer.Option( + help="Speeds up computations and saves tokens when running the same prompt multiple times by caching the LLM response.", + show_default=False, + ), + ] = True, ): """ The main function that runs the specified benchmarks with the given agent and outputs the results to the console. @@ -93,13 +104,16 @@ def main( Configuration file for choosing which benchmark problems to run. See default config for more details. yaml_output: Optional[str], default=None Pass a path to a yaml file to have results written to file. - verbose : bool, default=False + verbose : Optional[bool], default=False A flag to indicate whether to print results for each task. + use_cache : Optional[bool], default=True + Speeds up computations and saves tokens when running the same prompt multiple times by caching the LLM response. Returns ------- None """ - set_llm_cache(SQLiteCache(database_path=".langchain.db")) + if use_cache: + set_llm_cache(SQLiteCache(database_path=".langchain.db")) load_env_if_needed() config = BenchConfig.from_toml(bench_config) print("using config file: " + bench_config) From bdc7736821bbb1ef885b773b8970586b43de335f Mon Sep 17 00:00:00 2001 From: Axel Theorell Date: Wed, 22 May 2024 19:32:29 +0200 Subject: [PATCH 2/3] ruff formatting --- gpt_engineer/applications/cli/main.py | 2 +- gpt_engineer/benchmark/__main__.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/gpt_engineer/applications/cli/main.py b/gpt_engineer/applications/cli/main.py index 6efc1c2ec1..157e02f26b 100644 --- a/gpt_engineer/applications/cli/main.py +++ b/gpt_engineer/applications/cli/main.py @@ -36,8 +36,8 @@ import typer from dotenv import load_dotenv -from langchain_community.cache import SQLiteCache from langchain.globals import set_llm_cache +from langchain_community.cache import SQLiteCache from termcolor import colored from gpt_engineer.applications.cli.cli_agent import CliAgent diff --git a/gpt_engineer/benchmark/__main__.py b/gpt_engineer/benchmark/__main__.py index 2c4bbdb260..839fb87f41 100644 --- a/gpt_engineer/benchmark/__main__.py +++ b/gpt_engineer/benchmark/__main__.py @@ -22,12 +22,13 @@ import importlib import os.path import sys + from typing import Annotated, Optional import typer -from langchain_community.cache import SQLiteCache from langchain.globals import set_llm_cache +from langchain_community.cache import SQLiteCache from gpt_engineer.applications.cli.main import load_env_if_needed from gpt_engineer.benchmark.bench_config import BenchConfig From 15b559edc874c51d10385cbe5485ee4474b34c6b Mon Sep 17 00:00:00 2001 From: Axel Theorell Date: Wed, 22 May 2024 19:38:03 +0200 Subject: [PATCH 3/3] "--model" now option and not argument --- gpt_engineer/applications/cli/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt_engineer/applications/cli/main.py b/gpt_engineer/applications/cli/main.py index 157e02f26b..2b8d1f5d52 100644 --- a/gpt_engineer/applications/cli/main.py +++ b/gpt_engineer/applications/cli/main.py @@ -249,7 +249,7 @@ def prompt_yesno() -> bool: ) def main( project_path: str = typer.Argument(".", help="path"), - model: str = typer.Argument("gpt-4o", help="model id string"), + model: str = typer.Option("gpt-4o", "--model", "-m", help="model id string"), temperature: float = typer.Option( 0.1, "--temperature",