diff --git a/cookbook/logfire/README.md b/cookbook/logfire/README.md new file mode 100644 index 000000000..7034937a6 --- /dev/null +++ b/cookbook/logfire/README.md @@ -0,0 +1,41 @@ +## using logfire with marvin + +[logfire](https://github.com/pydantic/logfire?tab=readme-ov-file#pydantic-logfire--uncomplicated-observability) is brand new (Apr 2024) and is an observability tool for python applications - [otel](https://opentelemetry.io/docs/what-is-opentelemetry/)-based tracing, metrics, and logging. its pretty [awesome](https://docs.pydantic.dev/logfire/#pydantic-logfire-the-observability-platform-you-deserve). + +they also happen to wrap OpenAI pretty well out of the box! see `hello.py` for a simple example. + +### setup +```conosle +pip install marvin +``` +> [!NOTE] +> optionally, if you want to try out the fastapi integration +> ```console +> pip install 'logfire[fastapi]' uvicorn +> ``` + +login to logfire +```console +logfire auth +``` + +### usage +use of marvin should be no different than any other library. check out [logfire's documentation](https://docs.pydantic.dev/logfire/#pydantic-logfire-the-observability-platform-you-deserve) for more information. + + +### examples +```console +gh repo clone prefecthq/marvin && cd marvin +uvicorn cookbook.logfire.demo_app:app +``` + +in another terminal +```console +python cookbook/logfire/send_demo_request.py +``` + +check out the api docs at http://localhost:8000/docs or your logfire dashboard to see the traces and logs like: + +

+ logfire span +

diff --git a/cookbook/logfire/auto_trace.py b/cookbook/logfire/auto_trace.py new file mode 100644 index 000000000..873a477ee --- /dev/null +++ b/cookbook/logfire/auto_trace.py @@ -0,0 +1,7 @@ +import logfire + +logfire.install_auto_tracing(modules=["hello"]) + +from hello import main # noqa + +main() diff --git a/cookbook/logfire/demo_app.py b/cookbook/logfire/demo_app.py new file mode 100644 index 000000000..42caabbe6 --- /dev/null +++ b/cookbook/logfire/demo_app.py @@ -0,0 +1,79 @@ +from enum import Enum + +import logfire +import openai +from fastapi import Body, FastAPI +from marvin import fn +from marvin.client import AsyncMarvinClient +from pydantic import BaseModel + +app = FastAPI() +client = openai.AsyncClient() + +logfire.configure(pydantic_plugin=logfire.PydanticPlugin(record="all")) +logfire.instrument_openai(client) +logfire.instrument_fastapi(app) + + +class Seniority(Enum): + """ranked seniority levels for candidates""" + + JUNIOR = 1 + MID = 2 + SENIOR = 3 + STAFF = 4 + + +class Candidate(BaseModel): + name: str + self_identified_seniority: Seniority + bio: str + + +class Role(BaseModel): + title: str + desired_seniority: Seniority + description: str + + +@fn(client=AsyncMarvinClient(client=client)) +def choose_among_candidates(cohort: list[Candidate], role: Role) -> Candidate: + return ( + f"We need a {role.desired_seniority.name} (at least) {role.title} that can " + f"most likely fulfill a job of this description:\n{role.description}\n" + ) + + +@logfire.instrument("Dystopian Interview Process", extract_args=True) +def dystopian_interview_process(candidates: list[Candidate], role: Role) -> Candidate: + senior_enough_candidates = [ + candidate + for candidate in candidates + if candidate.self_identified_seniority.value >= role.desired_seniority.value + ] + logfire.info( + "Candidates at or above {seniority} level: {cohort}", + cohort=[c.name for c in senior_enough_candidates], + seniority=role.desired_seniority, + ) + if len(senior_enough_candidates) == 1: + return senior_enough_candidates[0] + + with logfire.span("Choosing among candidates"): + return choose_among_candidates(senior_enough_candidates, role) + + +@app.post("/interview") +async def interview( + candidates: list[Candidate] = Body(..., description="List of candidates"), + role: Role = Body(..., description="Role to fill"), +) -> Candidate: + best_candidate = dystopian_interview_process(candidates, role) + logfire.info("Best candidate: {best_candidate}", best_candidate=best_candidate) + return best_candidate + + +if __name__ == "__main__": + import uvicorn + + uvicorn.run(app, host="localhost", port=8000) diff --git a/cookbook/logfire/hello.py b/cookbook/logfire/hello.py new file mode 100644 index 000000000..94fa7d8cb --- /dev/null +++ b/cookbook/logfire/hello.py @@ -0,0 +1,40 @@ +""" +Example of using logfire to instrument OpenAI API calls + +see https://x.com/Nathan_Nowack/status/1785413529232708087 +""" + +import logfire +import openai +from marvin import fn +from marvin.client import AsyncMarvinClient +from pydantic import BaseModel, Field + +client = openai.AsyncClient() + +logfire.instrument_openai(client) + + +class Ingredients(BaseModel): + name: str + approximate_price: float = Field(..., gt=0, description="Price in USD") + quantity: int + + +class Recipe(BaseModel): + ingredients: list[Ingredients] + steps: list[str] + + +@fn(client=AsyncMarvinClient(client=client)) +def make_recipe(vibe: str) -> Recipe: + """Generate a recipe based on a vibe""" + + +def main(): + recipe = make_recipe("italian, for 4 people") + assert isinstance(recipe, Recipe) + + +if __name__ == "__main__": + main() diff --git a/cookbook/logfire/send_demo_request.py b/cookbook/logfire/send_demo_request.py new file mode 100644 index 000000000..40a805536 --- /dev/null +++ b/cookbook/logfire/send_demo_request.py @@ -0,0 +1,47 @@ +import asyncio +import json + +import httpx + + +async def main(): + candidates = [ + { + "name": "Alice", + "self_identified_seniority": 3, + "bio": "10 years with postgres, 5 years with python, 3 years with django.", + }, + { + "name": "Bob", + "self_identified_seniority": 1, + "bio": "I just graduated from a coding bootcamp and I'm ready to take on the world!", + }, + { + "name": "Charlie", + "self_identified_seniority": 2, + "bio": "graduated 2 years ago and i can make you a react app in no time", + }, + { + "name": "David", + "self_identified_seniority": 3, + "bio": "i just been riding that SCRUM wave for 10 years fam", + }, + ] + + role = { + "title": "Senior Software Engineer", + "desired_seniority": 3, + "description": "Build and maintain a large-scale web application with a team of 10+ engineers.", + } + + async with httpx.AsyncClient() as client: + response = await client.post( + "http://localhost:8000/interview", + json={"candidates": candidates, "role": role}, + ) + result = response.json() + print(json.dumps(result, indent=2)) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/docs/assets/images/docs/examples/logfire-span.jpeg b/docs/assets/images/docs/examples/logfire-span.jpeg new file mode 100644 index 000000000..7327c8369 Binary files /dev/null and b/docs/assets/images/docs/examples/logfire-span.jpeg differ diff --git a/pyproject.toml b/pyproject.toml index 303cbc2f0..6c66b0696 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -66,6 +66,7 @@ audio = [ "simpleaudio>=1.0", ] video = ["opencv-python >= 4.5"] + slackbot = ["marvin[prefect]", "numpy", "raggy", "turbopuffer"] [project.urls] diff --git a/src/marvin/beta/assistants/assistants.py b/src/marvin/beta/assistants/assistants.py index 832849694..22f88c439 100644 --- a/src/marvin/beta/assistants/assistants.py +++ b/src/marvin/beta/assistants/assistants.py @@ -223,7 +223,11 @@ async def chat_async( ): """Async method to start a chat session with the assistant.""" history = Path(assistant_dir) / "chat_history.txt" if assistant_dir else None - session = PromptSession(history=FileHistory(str(history)) if history else None) + if not history.exists(): + history.parent.mkdir(parents=True, exist_ok=True) + session = PromptSession( + history=FileHistory(str(history.absolute().resolve())) if history else None + ) # send an initial message, if provided if initial_message is not None: await self.say_async(initial_message, **kwargs) diff --git a/src/marvin/settings.py b/src/marvin/settings.py index c65057f28..b5c86edc1 100644 --- a/src/marvin/settings.py +++ b/src/marvin/settings.py @@ -36,9 +36,7 @@ class ChatCompletionSettings(MarvinSettings): model_config = SettingsConfigDict( env_prefix="marvin_chat_completions_", extra="ignore" ) - model: str = Field( - description="The default chat model to use.", default="gpt-4-1106-preview" - ) + model: str = Field(description="The default chat model to use.", default="gpt-4") temperature: float = Field(description="The default temperature to use.", default=1) @@ -292,6 +290,10 @@ class Settings(MarvinSettings): "Whether to log verbose messages, such as full API requests and responses." ), ) + max_tool_output_length: int = Field( + 150, + description="The maximum length of output from a tool before it is truncated.", + ) @field_validator("log_level", mode="after") @classmethod diff --git a/src/marvin/utilities/tools.py b/src/marvin/utilities/tools.py index 56884f522..2cc366c57 100644 --- a/src/marvin/utilities/tools.py +++ b/src/marvin/utilities/tools.py @@ -17,6 +17,7 @@ from pydantic.fields import FieldInfo from pydantic.json_schema import GenerateJsonSchema, JsonSchemaMode +import marvin from marvin.types import Function, FunctionTool from marvin.utilities.asyncio import run_sync from marvin.utilities.logging import get_logger @@ -168,14 +169,16 @@ def call_function_tool( arguments = json.loads(function_arguments_json) logger.debug_kv( - f"{tool.function.name}", f"called with arguments: {arguments}", "green" + f"{tool.function.name}", + f"called with arguments: {json.dumps(arguments, indent=2)}", + "green", ) output = tool.function._python_fn(**arguments) if inspect.isawaitable(output): output = run_sync(output) if isinstance(output, BaseModel): output = output.model_dump(mode="json") - truncated_output = str(output)[:100] + truncated_output = str(output)[: marvin.settings.max_tool_output_length] if len(truncated_output) < len(str(output)): truncated_output += "..." logger.debug_kv(f"{tool.function.name}", f"returned: {truncated_output}", "green") diff --git a/tests/ai/test_classifier.py b/tests/ai/test_classifier.py index ce0f65628..eba8707e2 100644 --- a/tests/ai/test_classifier.py +++ b/tests/ai/test_classifier.py @@ -27,7 +27,7 @@ def test_is_enum(): class TestClassifier: class TestSimple: def test_color_red(self): - result = Color("rose") + result = Color("burgundy") assert result == Color.RED def test_color_green(self):