Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

adding praisonaiagents tools to praisonai #222

Merged
merged 1 commit into from
Dec 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
FROM python:3.11-slim
WORKDIR /app
COPY . .
RUN pip install flask praisonai==2.0.15 gunicorn markdown
RUN pip install flask praisonai==2.0.16 gunicorn markdown
EXPOSE 8080
CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]
28 changes: 1 addition & 27 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -464,33 +464,7 @@ if __name__ == "__main__":

## Commands to Install Dependencies:

1. **Install all dependencies, including dev dependencies:**

```sh
poetry install
```

2. **Install only documentation dependencies:**

```sh
poetry install --with docs
```

3. **Install only test dependencies:**

```sh
poetry install --with test
```

4. **Install only dev dependencies:**

```sh
poetry install --with dev
```

This configuration ensures that your development dependencies are correctly categorized and installed as needed.

### Using uv (Fast Python Package Installer)
### Using uv
```bash
# Install uv if you haven't already
pip install uv
Expand Down
6 changes: 5 additions & 1 deletion agents.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,17 @@ roles:
role: Researcher
llm:
model: "gpt-4o"
reflect_llm:
model: "gpt-4o"
min_reflect: 2
max_reflect: 4
tasks:
research_task:
description: Research about Mars, its environment, and the feasibility of
a cat being on Mars. Also, research about cat behavior and characteristics.
expected_output: Document with research findings on Mars and cats.
tools:
- 'search_tool'
- search_tool
narrative_designer:
backstory: Skilled in narrative development, with a focus on creating engaging
stories.
Expand Down
5 changes: 4 additions & 1 deletion agents/example.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,10 @@ def internet_search_tool(query) -> list:
allow_delegation=False,
tools=[internet_search_tool],
llm="gpt-4o",
markdown=True
markdown=True,
reflect_llm="gpt-4o",
min_reflect=2,
max_reflect=4
)
writer = Agent(
name="Writer",
Expand Down
131 changes: 94 additions & 37 deletions agents/praisonaiagents/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,27 +21,49 @@ def _generate_tool_definition(self, function_name):
"""
Generate a tool definition from a function name by inspecting the function.
"""
logging.debug(f"Attempting to generate tool definition for: {function_name}")

# First try to get the tool definition if it exists
tool_def_name = f"{function_name}_definition"
tool_def = globals().get(tool_def_name)
logging.debug(f"Looking for {tool_def_name} in globals: {tool_def is not None}")

if not tool_def:
import __main__
tool_def = getattr(__main__, tool_def_name, None)
logging.debug(f"Looking for {tool_def_name} in __main__: {tool_def is not None}")

if tool_def:
logging.debug(f"Found tool definition: {tool_def}")
return tool_def

# If no definition exists, try to generate one from the function
func = globals().get(function_name)
# Try to find the function in the agent's tools list first
func = None
for tool in self.tools:
if callable(tool) and getattr(tool, '__name__', '') == function_name:
func = tool
break

logging.debug(f"Looking for {function_name} in agent tools: {func is not None}")

# If not found in tools, try globals and main
if not func:
import __main__
func = getattr(__main__, function_name, None)
func = globals().get(function_name)
logging.debug(f"Looking for {function_name} in globals: {func is not None}")

if not func:
import __main__
func = getattr(__main__, function_name, None)
logging.debug(f"Looking for {function_name} in __main__: {func is not None}")

if not func or not callable(func):
logging.debug(f"Function {function_name} not found or not callable")
return None

import inspect
sig = inspect.signature(func)
logging.debug(f"Function signature: {sig}")

parameters = {
"type": "object",
"properties": {},
Expand All @@ -50,17 +72,22 @@ def _generate_tool_definition(self, function_name):

# Parse docstring for parameter descriptions
docstring = inspect.getdoc(func)
logging.debug(f"Function docstring: {docstring}")

param_descriptions = {}
if docstring:
import re
param_section = re.split(r'\s*Args:\s*', docstring)
logging.debug(f"Param section split: {param_section}")
if len(param_section) > 1:
param_lines = param_section[1].split('\n')
for line in param_lines:
line = line.strip()
if line and ':' in line:
param_name, param_desc = line.split(':', 1)
param_descriptions[param_name.strip()] = param_desc.strip()

logging.debug(f"Parameter descriptions: {param_descriptions}")

for name, param in sig.parameters.items():
param_type = "string" # Default type
Expand All @@ -83,26 +110,30 @@ def _generate_tool_definition(self, function_name):
parameters["properties"][name] = param_info
if param.default == inspect.Parameter.empty:
parameters["required"].append(name)

logging.debug(f"Generated parameters: {parameters}")

# Extract description from docstring
description = docstring.split('\n')[0] if docstring else f"Function {function_name}"

return {
tool_def = {
"type": "function",
"function": {
"name": function_name,
"description": description,
"parameters": parameters
}
}
logging.debug(f"Generated tool definition: {tool_def}")
return tool_def

def __init__(
self,
name: str,
role: str,
goal: str,
backstory: str,
llm: Optional[Union[str, Any]] = "gpt-4o-mini",
llm: Optional[Union[str, Any]] = "gpt-4o",
tools: Optional[List[Any]] = None,
function_calling_llm: Optional[Any] = None,
max_iter: int = 20,
Expand All @@ -125,7 +156,9 @@ def __init__(
use_system_prompt: Optional[bool] = True,
markdown: bool = True,
self_reflect: bool = True,
max_reflection_iter: int = 3
max_reflect: int = 3,
min_reflect: int = 1,
reflect_llm: Optional[str] = None
):
self.name = name
self.role = role
Expand Down Expand Up @@ -155,28 +188,45 @@ def __init__(
self.chat_history = []
self.markdown = markdown
self.self_reflect = self_reflect
self.max_reflection_iter = max_reflection_iter

self.max_reflect = max_reflect
self.min_reflect = min_reflect
self.reflect_llm = reflect_llm
def execute_tool(self, function_name, arguments):
"""
Execute a tool dynamically based on the function name and arguments.
"""
logging.debug(f"{self.name} executing tool {function_name} with arguments: {arguments}")

# Try to get the function from globals first
func = globals().get(function_name)
# Try to find the function in the agent's tools list first
func = None
for tool in self.tools:
if callable(tool) and getattr(tool, '__name__', '') == function_name:
func = tool
break

logging.debug(f"Looking for {function_name} in agent tools: {func is not None}")

# If not found in tools, try globals and main
if not func:
# Then try to get from the main module
import __main__
func = getattr(__main__, function_name, None)
func = globals().get(function_name)
logging.debug(f"Looking for {function_name} in globals: {func is not None}")

if not func:
import __main__
func = getattr(__main__, function_name, None)
logging.debug(f"Looking for {function_name} in __main__: {func is not None}")

if func and callable(func):
try:
return func(**arguments)
except Exception as e:
return {"error": str(e)}
error_msg = str(e)
logging.error(f"Error executing tool {function_name}: {error_msg}")
return {"error": error_msg}

return {"error": f"Tool '{function_name}' is not callable"}
error_msg = f"Tool '{function_name}' is not callable"
logging.error(error_msg)
return {"error": error_msg}

def clear_history(self):
self.chat_history = []
Expand Down Expand Up @@ -287,8 +337,8 @@ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True):
def chat(self, prompt, temperature=0.2, tools=None, output_json=None):
if self.use_system_prompt:
system_prompt = f"""{self.backstory}\n
Your Role: {self.role}\n
Your Goal: {self.goal}
Your Role: {self.role}\n
Your Goal: {self.goal}
"""
else:
system_prompt = None
Expand Down Expand Up @@ -361,17 +411,17 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None):
return response_text

reflection_prompt = f"""
Reflect on your previous response: '{response_text}'.
Identify any flaws, improvements, or actions.
Provide a "satisfactory" status ('yes' or 'no').
Output MUST be JSON with 'reflection' and 'satisfactory'.
Reflect on your previous response: '{response_text}'.
Identify any flaws, improvements, or actions.
Provide a "satisfactory" status ('yes' or 'no').
Output MUST be JSON with 'reflection' and 'satisfactory'.
"""
logging.debug(f"{self.name} reflection attempt {reflection_count+1}, sending prompt: {reflection_prompt}")
messages.append({"role": "user", "content": reflection_prompt})

try:
reflection_response = client.beta.chat.completions.parse(
model=self.llm,
model=self.reflect_llm if self.reflect_llm else self.llm,
messages=messages,
temperature=temperature,
response_format=ReflectionOutput
Expand All @@ -380,35 +430,42 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None):
reflection_output = reflection_response.choices[0].message.parsed

if self.verbose:
display_self_reflection(f"Agent {self.name} self reflection: reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'")
display_self_reflection(f"Agent {self.name} self reflection (using {self.reflect_llm if self.reflect_llm else self.llm}): reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'")

messages.append({"role": "assistant", "content": f"Self Reflection: {reflection_output.reflection} Satisfactory?: {reflection_output.satisfactory}"})

if reflection_output.satisfactory == "yes":
# Only consider satisfactory after minimum reflections
if reflection_output.satisfactory == "yes" and reflection_count >= self.min_reflect - 1:
if self.verbose:
display_self_reflection("Agent marked the response as satisfactory")
display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections")
self.chat_history.append({"role": "user", "content": prompt})
self.chat_history.append({"role": "assistant", "content": response_text})
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time)
return response_text

logging.debug(f"{self.name} reflection not satisfactory, requesting regeneration.")
# Check if we've hit max reflections
if reflection_count >= self.max_reflect - 1:
if self.verbose:
display_self_reflection("Maximum reflection count reached, returning current response")
self.chat_history.append({"role": "user", "content": prompt})
self.chat_history.append({"role": "assistant", "content": response_text})
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time)
return response_text

logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=True)
response_text = response.choices[0].message.content.strip()
reflection_count += 1
continue # Continue the loop for more reflections

except Exception as e:
display_error(f"Error in parsing self-reflection json {e}. Retrying")
logging.error("Reflection parsing failed.", exc_info=True)
messages.append({"role": "assistant", "content": f"Self Reflection failed."})
reflection_count += 1
continue # Continue even after error to try again

reflection_count += 1

self.chat_history.append({"role": "user", "content": prompt})
self.chat_history.append({"role": "assistant", "content": response_text})

if self.verbose:
logging.info(f"Agent {self.name} final response: {response_text}")
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time)
return response_text
except Exception as e:
display_error(f"Error in chat: {e}")
return None
4 changes: 2 additions & 2 deletions agents/praisonaiagents/agents/agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,8 @@ def execute_task(self, task_id):
executor_agent = task.agent

task_prompt = f"""
You need to do the following task: {task.description}.
Expected Output: {task.expected_output}.
You need to do the following task: {task.description}.
Expected Output: {task.expected_output}.
"""
if task.context:
context_results = ""
Expand Down
2 changes: 1 addition & 1 deletion agents/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "praisonaiagents"
version = "0.0.7"
version = "0.0.12"
description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
authors = [
{ name="Mervin Praison" }
Expand Down
2 changes: 1 addition & 1 deletion agents/uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion docs/api/praisonai/deploy.html
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ <h2 id="raises">Raises</h2>
file.write(&#34;FROM python:3.11-slim\n&#34;)
file.write(&#34;WORKDIR /app\n&#34;)
file.write(&#34;COPY . .\n&#34;)
file.write(&#34;RUN pip install flask praisonai==2.0.15 gunicorn markdown\n&#34;)
file.write(&#34;RUN pip install flask praisonai==2.0.16 gunicorn markdown\n&#34;)
file.write(&#34;EXPOSE 8080\n&#34;)
file.write(&#39;CMD [&#34;gunicorn&#34;, &#34;-b&#34;, &#34;0.0.0.0:8080&#34;, &#34;api:app&#34;]\n&#39;)

Expand Down
Loading
Loading