Skip to content

Commit

Permalink
Merge pull request #609 from InterwebAlchemy/feature/more-configs
Browse files Browse the repository at this point in the history
feat: add support for loading different config.yaml files
  • Loading branch information
KillianLucas authored Oct 11, 2023
2 parents 492a62e + b0fb3d7 commit 8ec9492
Show file tree
Hide file tree
Showing 9 changed files with 231 additions and 72 deletions.
51 changes: 51 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -262,6 +262,57 @@ Run the following command to open the configuration file:
interpreter --config
```

#### Multiple Configuration Files

Open Interpreter supports multiple `config.yaml` files, allowing you to easily switch between configurations via the `--config_file` argument.

**Note**: `--config_file` accepts either a file name or a file path. File names will use the default configuration directory, while file paths will use the specified path.

To create or edit a new configuration, run:

```
interpreter --config --config_file $config_path
```

To have Open Interpreter load a specific configuration file run:

```
interpreter --config_file $config_path
```

**Note**: Replace `$config_path` with the name of or path to your configuration file.

##### CLI Example

1. Create a new `config.turbo.yaml` file
```
interpreter --config --config_file config.turbo.yaml
```
2. Edit the `config.turbo.yaml` file to set `model` to `gpt-3.5-turbo`
3. Run Open Interpreter with the `config.turbo.yaml` configuration
```
interpreter --config_file config.turbo.yaml
```

##### Python Example

You can also load configuration files when calling Open Interpreter from Python scripts:

```python
import os
import interpreter

currentPath = os.path.dirname(os.path.abspath(__file__))
config_path=os.path.join(currentPath, './config.test.yaml')

interpreter.extend_config(config_path=config_path)

message = "What operating system are we on?"

for chunk in interpreter.chat(message, display=False, stream=True):
print(chunk)
```

## Safety Notice

Since generated code is executed in your local environment, it can interact with your files and system settings, potentially leading to unexpected outcomes like data loss or security risks.
Expand Down
130 changes: 90 additions & 40 deletions interpreter/cli/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,111 +3,147 @@
import os
import platform
import pkg_resources
import appdirs
from ..utils.display_markdown_message import display_markdown_message
from ..utils.get_config import get_config_path
from ..terminal_interface.conversation_navigator import conversation_navigator

arguments = [
{
"name": "system_message",
"nickname": "s",
"help_text": "prompt / custom instructions for the language model",
"type": str
},
{
"name": "local",
"nickname": "l",
"help_text": "run in local mode",
"type": bool
"type": str,
},
{"name": "local", "nickname": "l", "help_text": "run in local mode", "type": bool},
{
"name": "auto_run",
"nickname": "y",
"help_text": "automatically run the interpreter",
"type": bool
"type": bool,
},
{
"name": "debug_mode",
"nickname": "d",
"help_text": "run in debug mode",
"type": bool
"type": bool,
},
{
"name": "model",
"nickname": "m",
"help_text": "model to use for the language model",
"type": str
"type": str,
},
{
"name": "temperature",
"nickname": "t",
"help_text": "optional temperature setting for the language model",
"type": float
"type": float,
},
{
"name": "context_window",
"nickname": "c",
"help_text": "optional context window size for the language model",
"type": int
"type": int,
},
{
"name": "max_tokens",
"nickname": "x",
"help_text": "optional maximum number of tokens for the language model",
"type": int
"type": int,
},
{
"name": "max_budget",
"nickname": "b",
"help_text": "optionally set the max budget (in USD) for your llm calls",
"type": float
"type": float,
},
{
"name": "api_base",
"nickname": "ab",
"help_text": "optionally set the API base URL for your llm calls (this will override environment variables)",
"type": str
"type": str,
},
{
"name": "api_key",
"nickname": "ak",
"help_text": "optionally set the API key for your llm calls (this will override environment variables)",
"type": str
"type": str,
},
{
"name": "safe_mode",
"nickname": "safe",
"help_text": "optionally enable safety mechanisms like code scanning; valid options are off, ask, and auto",
"type": str,
"choices": ["off", "ask", "auto"]
"choices": ["off", "ask", "auto"],
},
{
"name": "gguf_quality",
"nickname": "q",
"help_text": "(experimental) value from 0-1 which will select the gguf quality/quantization level. lower = smaller, faster, more quantized",
"type": float,
}
},
{
"name": "config_file",
"nickname": "cf",
"help_text": "optionally set a custom config file to use",
"type": str,
},
]

def cli(interpreter):

def cli(interpreter):
parser = argparse.ArgumentParser(description="Open Interpreter")

# Add arguments
for arg in arguments:
if arg["type"] == bool:
parser.add_argument(f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], action='store_true', default=None)
parser.add_argument(
f'-{arg["nickname"]}',
f'--{arg["name"]}',
dest=arg["name"],
help=arg["help_text"],
action="store_true",
default=None,
)
else:
choices = arg["choices"] if "choices" in arg else None
default = arg["default"] if "default" in arg else None

parser.add_argument(f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=arg["type"], choices=choices, default=default)
parser.add_argument(
f'-{arg["nickname"]}',
f'--{arg["name"]}',
dest=arg["name"],
help=arg["help_text"],
type=arg["type"],
choices=choices,
default=default,
)

# Add special arguments
parser.add_argument('--config', dest='config', action='store_true', help='open config.yaml file in text editor')
parser.add_argument('--conversations', dest='conversations', action='store_true', help='list conversations to resume')
parser.add_argument('-f', '--fast', dest='fast', action='store_true', help='(depracated) runs `interpreter --model gpt-3.5-turbo`')
parser.add_argument('--version', dest='version', action='store_true', help="get Open Interpreter's version number")
parser.add_argument(
"--config",
dest="config",
action="store_true",
help="open config.yaml file in text editor",
)
parser.add_argument(
"--conversations",
dest="conversations",
action="store_true",
help="list conversations to resume",
)
parser.add_argument(
"-f",
"--fast",
dest="fast",
action="store_true",
help="(depracated) runs `interpreter --model gpt-3.5-turbo`",
)
parser.add_argument(
"--version",
dest="version",
action="store_true",
help="get Open Interpreter's version number",
)

# TODO: Implement model explorer
# parser.add_argument('--models', dest='models', action='store_true', help='list avaliable models')
Expand All @@ -117,21 +153,27 @@ def cli(interpreter):
# This should be pushed into an open_config.py util
# If --config is used, open the config.yaml file in the Open Interpreter folder of the user's config dir
if args.config:
config_dir = appdirs.user_config_dir("Open Interpreter")
config_path = os.path.join(config_dir, 'config.yaml')
print(f"Opening `{config_path}`...")
if args.config_file:
config_file = get_config_path(args.config_file)
else:
config_file = get_config_path()

print(f"Opening `{config_file}`...")

# Use the default system editor to open the file
if platform.system() == 'Windows':
os.startfile(config_path) # This will open the file with the default application, e.g., Notepad
if platform.system() == "Windows":
os.startfile(
config_file
) # This will open the file with the default application, e.g., Notepad
else:
try:
# Try using xdg-open on non-Windows platforms
subprocess.call(['xdg-open', config_path])
subprocess.call(["xdg-open", config_file])
except FileNotFoundError:
# Fallback to using 'open' on macOS if 'xdg-open' is not available
subprocess.call(['open', config_path])
subprocess.call(["open", config_file])
return

# TODO Implement model explorer
"""
# If --models is used, list models
Expand All @@ -144,7 +186,13 @@ def cli(interpreter):
for attr_name, attr_value in vars(args).items():
# Ignore things that aren't possible attributes on interpreter
if attr_value is not None and hasattr(interpreter, attr_name):
setattr(interpreter, attr_name, attr_value)
# If the user has provided a config file, load it and extend interpreter's configuration
if attr_name == "config_file":
user_config = get_config_path(attr_value)
interpreter.config_file = user_config
interpreter.extend_config(config_path=user_config)
else:
setattr(interpreter, attr_name, attr_value)

# if safe_mode and auto_run are enabled, safe_mode disables auto_run
if interpreter.auto_run and not interpreter.safe_mode == "off":
Expand All @@ -159,16 +207,18 @@ def cli(interpreter):
if args.conversations:
conversation_navigator(interpreter)
return

if args.version:
version = pkg_resources.get_distribution("open-interpreter").version
print(f"Open Interpreter {version}")
return

# Depracated --fast
if args.fast:
# This will cause the terminal_interface to walk the user through setting up a local LLM
interpreter.model = "gpt-3.5-turbo"
print("`interpreter --fast` is depracated and will be removed in the next version. Please use `interpreter --model gpt-3.5-turbo`")
print(
"`interpreter --fast` is depracated and will be removed in the next version. Please use `interpreter --model gpt-3.5-turbo`"
)

interpreter.chat()
interpreter.chat()
17 changes: 13 additions & 4 deletions interpreter/core/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
"""
from interpreter.utils import display_markdown_message
from ..cli.cli import cli
from ..utils.get_config import get_config
from ..utils.get_config import get_config, user_config_path
from ..utils.local_storage_path import get_storage_path
from .respond import respond
from ..llm.setup_llm import setup_llm
from ..terminal_interface.terminal_interface import terminal_interface
Expand All @@ -25,6 +26,8 @@ def __init__(self):
self.messages = []
self._code_interpreters = {}

self.config_file = user_config_path

# Settings
self.local = False
self.auto_run = False
Expand All @@ -35,7 +38,7 @@ def __init__(self):
# Conversation history
self.conversation_history = True
self.conversation_filename = None
self.conversation_history_path = os.path.join(appdirs.user_data_dir("Open Interpreter"), "conversations")
self.conversation_history_path = get_storage_path("conversations")

# LLM settings
self.model = ""
Expand All @@ -50,15 +53,21 @@ def __init__(self):
self.gguf_quality = None

# Load config defaults
config = get_config()
self.__dict__.update(config)
self.extend_config(self.config_file)

# Check for update
if not self.local:
# This should actually be pushed into the utility
if check_for_update():
display_markdown_message("> **A new version of Open Interpreter is available.**\n>Please run: `pip install --upgrade open-interpreter`\n\n---")

def extend_config(self, config_path):
if self.debug_mode:
print(f'Extending configuration from `{config_path}`')

config = get_config(config_path)
self.__dict__.update(config)

def chat(self, message=None, display=True, stream=False):
if stream:
return self._streaming_chat(message=message, display=display)
Expand Down
5 changes: 2 additions & 3 deletions interpreter/terminal_interface/conversation_navigator.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,18 @@
This file handles conversations.
"""

import appdirs
import inquirer
import subprocess
import platform
import os
import json
from .render_past_conversation import render_past_conversation
from ..utils.display_markdown_message import display_markdown_message
from ..utils.local_storage_path import get_storage_path

def conversation_navigator(interpreter):

data_dir = appdirs.user_data_dir("Open Interpreter")
conversations_dir = os.path.join(data_dir, "conversations")
conversations_dir = get_storage_path("conversations")

display_markdown_message(f"""> Conversations are stored in "`{conversations_dir}`".
Expand Down
Loading

0 comments on commit 8ec9492

Please sign in to comment.