Skip to content

Commit

Permalink
Litellm dev 12 19 2024 p2 (#7315)
Browse files Browse the repository at this point in the history
* fix(proxy_server.py): only update k,v pair if v is not empty/null

Fixes #6787

* test(test_router.py): cleanup duplicate calls

* test: add new test stream options drop params test

* test: update optional params / stream options test to test for vertex ai mistral route specifically

Addresses #7309

* fix(proxy_server.py): fix linting errors

* fix: fix linting errors
  • Loading branch information
krrishdholakia authored Dec 20, 2024
1 parent 3507621 commit 4c7a393
Show file tree
Hide file tree
Showing 5 changed files with 106 additions and 34 deletions.
8 changes: 6 additions & 2 deletions litellm/proxy/_new_secret_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,9 @@ model_list:
api_version: "2024-05-01-preview"

litellm_settings:
success_callback: ["langsmith"]
num_retries: 0
default_team_settings:
- team_id: c91e32bb-0f2a-4aa1-86c4-307ca2e03ea3
success_callback: ["langfuse"]
failure_callback: ["langfuse"]
langfuse_public_key: my-fake-key
langfuse_secret: my-fake-secret
60 changes: 44 additions & 16 deletions litellm/proxy/proxy_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -1354,6 +1354,19 @@ def _check_for_os_environ_vars(
config[key] = get_secret(value)
return config

def _get_team_config(self, team_id: str, all_teams_config: List[Dict]) -> Dict:
team_config: dict = {}
for team in all_teams_config:
if "team_id" not in team:
raise Exception(f"team_id missing from team: {team}")
if team_id == team["team_id"]:
team_config = team
break
for k, v in team_config.items():
if isinstance(v, str) and v.startswith("os.environ/"):
team_config[k] = get_secret(v)
return team_config

async def load_team_config(self, team_id: str):
"""
- for a given team id
Expand All @@ -1366,18 +1379,11 @@ async def load_team_config(self, team_id: str):
## LITELLM MODULE SETTINGS (e.g. litellm.drop_params=True,..)
litellm_settings = config.get("litellm_settings", {})
all_teams_config = litellm_settings.get("default_team_settings", None)
team_config: dict = {}
if all_teams_config is None:
return team_config
for team in all_teams_config:
if "team_id" not in team:
raise Exception(f"team_id missing from team: {team}")
if team_id == team["team_id"]:
team_config = team
break
for k, v in team_config.items():
if isinstance(v, str) and v.startswith("os.environ/"):
team_config[k] = get_secret(v)
return {}
team_config = self._get_team_config(
team_id=team_id, all_teams_config=all_teams_config
)
return team_config

def _init_cache(
Expand Down Expand Up @@ -1452,9 +1458,12 @@ async def get_config(self, config_file_path: Optional[str] = None) -> dict:

config = self._check_for_os_environ_vars(config=config)

self.config = config
self.update_config_state(config=config)
return config

def update_config_state(self, config: dict):
self.config = config

async def load_config( # noqa: PLR0915
self, router: Optional[litellm.Router], config_file_path: str
):
Expand Down Expand Up @@ -2272,6 +2281,24 @@ async def _update_general_settings(self, db_general_settings: Optional[Json]):
pass_through_endpoints=general_settings["pass_through_endpoints"]
)

def _update_config_fields(
self,
current_config: dict,
param_name: str,
db_param_value: Any,
) -> dict:
if isinstance(current_config[param_name], dict):
# if dict exists (e.g. litellm_settings),
# go through each key and value,
# and update if new value is not None/empty dict
for key, value in db_param_value.items():
if value:
current_config[param_name][key] = value
else:
current_config[param_name] = db_param_value

return current_config

async def _update_config_from_db(
self,
prisma_client: PrismaClient,
Expand Down Expand Up @@ -2311,10 +2338,11 @@ async def _update_config_from_db(
if param_name is not None and param_value is not None:
# check if param_name is already in the config
if param_name in config:
if isinstance(config[param_name], dict):
config[param_name].update(param_value)
else:
config[param_name] = param_value
config = self._update_config_fields(
current_config=config,
param_name=param_name,
db_param_value=param_value,
)
else:
# if it's not in the config - then add it
config[param_name] = param_value
Expand Down
14 changes: 14 additions & 0 deletions tests/llm_translation/test_optional_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -502,6 +502,20 @@ def test_dynamic_drop_additional_params(drop_params):
pass


def test_dynamic_drop_additional_params_stream_options():
"""
Make a call to vertex ai, dropping 'stream_options' specifically
"""
optional_params = litellm.utils.get_optional_params(
model="mistral-large-2411@001",
custom_llm_provider="vertex_ai",
stream_options={"include_usage": True},
additional_drop_params=["stream_options"],
)

assert "stream_options" not in optional_params


def test_dynamic_drop_additional_params_e2e():
with patch(
"litellm.llms.custom_httpx.http_handler.HTTPHandler.post", new=MagicMock()
Expand Down
16 changes: 0 additions & 16 deletions tests/local_testing/test_router.py
Original file line number Diff line number Diff line change
Expand Up @@ -2220,22 +2220,6 @@ def test_router_cooldown_api_connection_error():
except litellm.APIConnectionError:
pass

try:
router.completion(
model="gemini-1.5-pro",
messages=[{"role": "admin", "content": "Fail on this!"}],
)
except litellm.APIConnectionError:
pass

try:
router.completion(
model="gemini-1.5-pro",
messages=[{"role": "admin", "content": "Fail on this!"}],
)
except litellm.APIConnectionError:
pass


def test_router_correctly_reraise_error():
"""
Expand Down
42 changes: 42 additions & 0 deletions tests/proxy_unit_tests/test_proxy_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -934,3 +934,45 @@ def test_get_team_models():
model_access_groups=model_access_groups,
)
assert result == ["gpt-4o", "gpt-3.5-turbo", "gpt-4o-mini"]


def test_update_config_fields():
from litellm.proxy.proxy_server import ProxyConfig

proxy_config = ProxyConfig()

args = {
"current_config": {
"litellm_settings": {
"default_team_settings": [
{
"team_id": "c91e32bb-0f2a-4aa1-86c4-307ca2e03ea3",
"success_callback": ["langfuse"],
"failure_callback": ["langfuse"],
"langfuse_public_key": "my-fake-key",
"langfuse_secret": "my-fake-secret",
}
]
},
},
"param_name": "litellm_settings",
"db_param_value": {
"telemetry": False,
"drop_params": True,
"num_retries": 5,
"request_timeout": 600,
"success_callback": ["langfuse"],
"default_team_settings": [],
"context_window_fallbacks": [{"gpt-3.5-turbo": ["gpt-3.5-turbo-large"]}],
},
}
updated_config = proxy_config._update_config_fields(**args)

all_team_config = updated_config["litellm_settings"]["default_team_settings"]

# check if team id config returned
team_config = proxy_config._get_team_config(
team_id="c91e32bb-0f2a-4aa1-86c4-307ca2e03ea3", all_teams_config=all_team_config
)
assert team_config["langfuse_public_key"] == "my-fake-key"
assert team_config["langfuse_secret"] == "my-fake-secret"

0 comments on commit 4c7a393

Please sign in to comment.