diff --git a/interpreter_1/cli.py b/interpreter_1/cli.py index 14b5c9668..01b8fb439 100644 --- a/interpreter_1/cli.py +++ b/interpreter_1/cli.py @@ -158,6 +158,7 @@ def _profile_to_arg_params(profile: Profile) -> Dict[str, Dict[str, Any]]: "flags": ["--profile"], "default": profile.profile_path, "help": "Path to profile configuration", + "metavar": "PATH", }, "debug": { "flags": ["--debug", "-d"], @@ -240,11 +241,14 @@ def parse_args(): parser = argparse.ArgumentParser(add_help=False) - parser.add_argument("--help", "-h", action="store_true", help=argparse.SUPPRESS) - parser.add_argument("--version", action="store_true", help=argparse.SUPPRESS) + parser.add_argument("--help", "-h", action="store_true", help="Show help") + parser.add_argument("--version", action="store_true", help="Show version") parser.add_argument( "--profiles", action="store_true", help="Open profiles directory" ) + parser.add_argument( + "--save", action="store", metavar="PATH", help="Save profile to path" + ) arg_params = _profile_to_arg_params(profile) for param in arg_params.values(): @@ -271,6 +275,14 @@ def parse_args(): if key in args and args[key] is None: args[key] = value + if args["save"]: + # Apply CLI args to profile + for key, value in args.items(): + if key in vars(profile) and value is not None: + setattr(profile, key, value) + profile.save(args["save"]) + sys.exit(0) + return args diff --git a/interpreter_1/misc/help.py b/interpreter_1/misc/help.py index 13005ceba..3c6ddc46d 100644 --- a/interpreter_1/misc/help.py +++ b/interpreter_1/misc/help.py @@ -80,29 +80,31 @@ def help_message(): A modern command-line assistant. options: - --model model to use for completion - --provider api provider (e.g. openai, anthropic) - --api-base base url for api requests - --api-key api key for authentication - --api-version api version to use - --temperature sampling temperature (default: 0) - - --tools comma-separated tools: interpreter,editor,gui - --allowed-commands commands the model can execute - --allowed-paths paths the model can access - --no-tool-calling disable tool calling (instead parse markdown code) - --auto-run, -y auto-run suggested commands - --interactive force interactive mode (true if sys.stdin.isatty()) - --no-interactive disable interactive mode - - --instructions additional instructions in system message - --input pre-fill first user message - --system-message override default system message - --max-turns maximum conversation turns (-1 for unlimited) - - --profile load settings from config file - --profiles open profiles directory - --serve start openai-compatible server + --models show available models + --model model to use for completion + --provider api provider (e.g. openai, anthropic) + --api-base base url for api requests + --api-key api key for authentication + --api-version api version to use + --temperature sampling temperature (default: 0) + + --tools comma-separated tools: interpreter,editor,gui + --allowed-commands commands the model can execute + --allowed-paths paths the model can access + --no-tool-calling disable tool calling (instead parse markdown code) + --auto-run, -y auto-run suggested commands + --interactive force interactive mode (true if sys.stdin.isatty()) + --no-interactive disable interactive mode + + --instructions additional instructions in system message + --input pre-fill first user message + --system-message override default system message + --max-turns maximum conversation turns (-1 for unlimited) + + --profile load settings from config file or url + --save save settings to config file + --profiles open profiles directory + --serve start openai-compatible server example: i want a venv example: interpreter --model ollama/llama3.2 --serve