Skip to content

Commit

Permalink
Feature/basic proxy (Significant-Gravitas#4164)
Browse files Browse the repository at this point in the history
* basic proxy (Significant-Gravitas#54)

* basic proxy (Significant-Gravitas#55)

* basic proxy

* basic proxy

* basic proxy

* basic proxy

* add back double quotes

* add more specific files

* write file

* basic proxy

* Put back double quotes
  • Loading branch information
waynehamadi authored May 13, 2023
1 parent e6f8e51 commit 4143d21
Show file tree
Hide file tree
Showing 22 changed files with 300 additions and 36 deletions.
49 changes: 49 additions & 0 deletions .github/workflows/add-cassettes.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
name: Merge and Commit Cassettes

on:
pull_request_target:
types:
- closed

jobs:
update-cassettes:
if: github.event.pull_request.merged == true
runs-on: ubuntu-latest

steps:
- name: Checkout code
uses: actions/checkout@v3
with:
fetch-depth: 0 # This is necessary to fetch all branches and tags

- name: Fetch all branches
run: git fetch --all

- name: Reset branch
run: |
git checkout ${{ github.event.pull_request.base.ref }}
git reset --hard origin/cassette-diff-${{ github.event.pull_request.number }}
- name: Create PR
id: create_pr
uses: peter-evans/create-pull-request@v5
with:
commit-message: Update cassettes
signoff: false
branch: cassette-diff-${{ github.event.pull_request.number }}
delete-branch: false
title: "Update cassettes"
body: "This PR updates the cassettes."
draft: false

- name: Check PR
run: |
echo "Pull Request Number - ${{ steps.create_pr.outputs.pull-request-number }}"
echo "Pull Request URL - ${{ steps.create_pr.outputs.pull-request-url }}"
- name: Comment PR URL in the current PR
uses: thollander/actions-comment-pull-request@v2
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
message: |
New pull request created for cassettes: [HERE](${{ steps.create_pr.outputs.pull-request-url }}). Please merge it asap.
24 changes: 22 additions & 2 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@ name: Python CI
on:
push:
branches: [ master ]
pull_request:
pull_request_target:
branches: [ master, stable ]

concurrency:
group: ${{ format('ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
cancel-in-progress: ${{ github.event_name == 'pull_request_target' }}

jobs:
lint:
Expand All @@ -19,6 +19,9 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v3
with:
fetch-depth: 0
ref: ${{ github.event.pull_request.head.ref }}

- name: Set up Python ${{ env.min-python-version }}
uses: actions/setup-python@v2
Expand Down Expand Up @@ -58,6 +61,9 @@ jobs:
steps:
- name: Check out repository
uses: actions/checkout@v3
with:
fetch-depth: 0
ref: ${{ github.event.pull_request.head.ref }}

- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
Expand All @@ -74,6 +80,20 @@ jobs:
pytest -n auto --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term
env:
CI: true
PROXY: ${{ vars.PROXY }}
AGENT_MODE: ${{ vars.AGENT_MODE }}
AGENT_TYPE: ${{ vars.AGENT_TYPE }}

- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3

- name: Stage new files and commit
run: |
git add tests
git diff --cached --quiet && echo "No changes to commit" && exit 0
git config user.email "[email protected]"
git config user.name "GitHub Actions"
git commit -m "Add new cassettes"
git checkout -b cassette-diff-${{ github.event.pull_request.number }}
git push -f origin cassette-diff-${{ github.event.pull_request.number }}
echo "COMMIT_SHA=$(git rev-parse HEAD)" >> $GITHUB_ENV
9 changes: 5 additions & 4 deletions autogpt/llm/api_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,10 +59,11 @@ def create_chat_completion(
max_tokens=max_tokens,
api_key=cfg.openai_api_key,
)
logger.debug(f"Response: {response}")
prompt_tokens = response.usage.prompt_tokens
completion_tokens = response.usage.completion_tokens
self.update_cost(prompt_tokens, completion_tokens, model)
if not hasattr(response, "error"):
logger.debug(f"Response: {response}")
prompt_tokens = response.usage.prompt_tokens
completion_tokens = response.usage.completion_tokens
self.update_cost(prompt_tokens, completion_tokens, model)
return response

def update_cost(self, prompt_tokens, completion_tokens, model):
Expand Down
2 changes: 1 addition & 1 deletion autogpt/llm/llm_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ def create_chat_completion(
)
warned_user = True
except (APIError, Timeout) as e:
if e.http_status != 502:
if e.http_status != 502 :
raise
if attempt == num_retries - 1:
raise
Expand Down
3 changes: 3 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import os
from pathlib import Path

import pytest
Expand All @@ -9,6 +10,8 @@

pytest_plugins = ["tests.integration.agent_factory"]

PROXY = os.environ.get("PROXY")


@pytest.fixture()
def workspace_root(tmp_path: Path) -> Path:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def input_generator(input_sequence: list) -> Generator[str, None, None]:
@requires_api_key("OPENAI_API_KEY")
@run_multiple_times(3)
def test_information_retrieval_challenge_a(
get_company_revenue_agent, monkeypatch
get_company_revenue_agent, monkeypatch, patched_api_requestor
) -> None:
"""
Test the challenge_a function in a given agent by mocking user inputs and checking the output file content.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
@pytest.mark.vcr
@requires_api_key("OPENAI_API_KEY")
def test_memory_challenge_a(
memory_management_agent: Agent, user_selected_level: int
memory_management_agent: Agent, user_selected_level: int, patched_api_requestor
) -> None:
"""
The agent reads a file containing a task_id. Then, it reads a series of other files.
Expand All @@ -30,7 +30,7 @@ def test_memory_challenge_a(
create_instructions_files(memory_management_agent, num_files, task_id)

try:
run_interaction_loop(memory_management_agent, 180)
run_interaction_loop(memory_management_agent, 400)
# catch system exit exceptions
except SystemExit:
file_path = str(memory_management_agent.workspace.get_path("output.txt"))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
@pytest.mark.vcr
@requires_api_key("OPENAI_API_KEY")
def test_memory_challenge_b(
memory_management_agent: Agent, user_selected_level: int
memory_management_agent: Agent, user_selected_level: int, patched_api_requestor
) -> None:
"""
The agent reads a series of files, each containing a task_id and noise. After reading 'n' files,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
@pytest.mark.vcr
@requires_api_key("OPENAI_API_KEY")
def test_memory_challenge_c(
memory_management_agent: Agent, user_selected_level: int
memory_management_agent: Agent, user_selected_level: int, patched_api_requestor
) -> None:
"""
Instead of reading task Ids from files as with the previous challenges, the agent now must remember
Expand Down
36 changes: 35 additions & 1 deletion tests/integration/conftest.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
import os

import openai
import pytest

from tests.conftest import PROXY
from tests.vcr.vcr_filter import before_record_request, before_record_response


Expand All @@ -17,5 +19,37 @@ def vcr_config():
"X-OpenAI-Client-User-Agent",
"User-Agent",
],
"match_on": ["method", "uri", "body"],
"match_on": ["method", "body"],
}


def patch_api_base(requestor):
new_api_base = f"{PROXY}/v1"
requestor.api_base = new_api_base
return requestor


@pytest.fixture
def patched_api_requestor(mocker):
original_init = openai.api_requestor.APIRequestor.__init__
original_validate_headers = openai.api_requestor.APIRequestor._validate_headers

def patched_init(requestor, *args, **kwargs):
original_init(requestor, *args, **kwargs)
patch_api_base(requestor)

def patched_validate_headers(self, supplied_headers):
headers = original_validate_headers(self, supplied_headers)
headers["AGENT-MODE"] = os.environ.get("AGENT_MODE")
headers["AGENT-TYPE"] = os.environ.get("AGENT_TYPE")
return headers

if PROXY:
mocker.patch("openai.api_requestor.APIRequestor.__init__", new=patched_init)
mocker.patch.object(
openai.api_requestor.APIRequestor,
"_validate_headers",
new=patched_validate_headers,
)

return mocker
Original file line number Diff line number Diff line change
Expand Up @@ -573,4 +573,120 @@ interactions:
status:
code: 200
message: OK
- request:
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
are write_to_file-GPT, an AI designed to use the write_to_file command to write
''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete
command to complete the task.\nYour decisions must always be made independently
without seeking user assistance. Play to your strengths as an LLM and pursue
simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file
command to write ''Hello World'' into a file named \"hello_world.txt\".\n2.
Use the task_complete command to complete the task.\n3. Do not use any other
commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your
short term memory is short, so immediately save important information to files.\n2.
If you are unsure how you previously did something or want to recall past events,
thinking about similar events will help you remember.\n3. No user assistance\n4.
Exclusively use the commands listed in double quote e.g. \"command name\"\n\nCommands:\n1.
append_to_file: Append to file, args: \"filename\": \"<filename>\", \"text\":
\"<text>\"\n2. delete_file: Delete file, args: \"filename\": \"<filename>\"\n3.
list_files: List Files in Directory, args: \"directory\": \"<directory>\"\n4.
read_file: Read file, args: \"filename\": \"<filename>\"\n5. write_to_file:
Write to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n6.
delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n7. get_hyperlinks:
Get text summary, args: \"url\": \"<url>\"\n8. get_text_summary: Get text summary,
args: \"url\": \"<url>\", \"question\": \"<question>\"\n9. list_agents: List
GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\":
\"<key>\", \"message\": \"<message>\"\n11. start_agent: Start GPT Agent, args:
\"name\": \"<name>\", \"task\": \"<short_task_desc>\", \"prompt\": \"<prompt>\"\n12.
task_complete: Task Complete (Shutdown), args: \"reason\": \"<reason>\"\n\nResources:\n1.
Internet access for searches and information gathering.\n2. Long Term memory
management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File
output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your
actions to ensure you are performing to the best of your abilities.\n2. Constructively
self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions
and strategies to refine your approach.\n4. Every command has a cost, so be
smart and efficient. Aim to complete tasks in the least number of steps.\n5.
Write all code to a file.\n\nYou should only respond in JSON format as described
below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\":
\"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n-
long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\":
\"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\":
\"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n}
\nEnsure the response can be parsed by Python json.loads"}, {"role": "system",
"content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role":
"user", "content": "Determine which next command to use, and respond using the
format specified above:"}], "temperature": 0, "max_tokens": 0}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '3405'
Content-Type:
- application/json
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA7yTT4/TMBDF73yK0Vx6cauUqt1trmhhK8QBBEKIoMrrTBvT2BPsCVu2yndfJemf
3SBOCK7zxu/9xh4f0OaYoim0GFeV46vX7u3i183N/Geup/7Lq9vVh/fXEt49PLwpNCrku+9k5Hhi
YthVJYlljwpNIC2UYzpdXM+Wy3kySxQ6zqnEFLeVjGeT+VjqcMfjZJZMUWEd9ZYwPWAV2FWyFt6R
j5heLRKFF+9z/WUyVSgsujyXlotpo9AUbA1FTL8e0FE82QYuCVPUMdoo2ksLyV7ItwMcMg8AkKEU
XG8LiRmmcCweBdpLW8xwBZ4oB2GoI4EUBPfBCq2F1xtbEhh2TvuuoRNgdEtlyfCZQ5mPwHph0NC1
eu0oh1HR6uv7Vp/IXkaTDNXT7EA6srd+2wN8LAhExx0E+lHbQBEc/UWago72j3PY2ImOo4CuqsBV
sFoINhxAilbVcTdErkrte9oxfPpP12SCFWtsdMN3Ih/r0DJogdX51QyHQEYuEf090F4uTMJda9sy
TIsV6d0p6d6W5b9chz64Uac1PZr+tqWtQ8/0DGKArsN2uOC90PZeLAYcz0yGn+LJTCfajvgInvkG
G4Ub620s1v0+Y4pRuEKF1ue0xzRpvjUvHgEAAP//AwDSj7qBhAQAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 7c6c3f8bcdd1cf87-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 13 May 2023 16:24:06 GMT
Server:
- cloudflare
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
openai-model:
- gpt-3.5-turbo-0301
openai-organization:
- user-adtx4fhfg1qsiyzdoaxciooj
openai-processing-ms:
- '16269'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '3500'
x-ratelimit-limit-tokens:
- '90000'
x-ratelimit-remaining-requests:
- '3499'
x-ratelimit-remaining-tokens:
- '86496'
x-ratelimit-reset-requests:
- 17ms
x-ratelimit-reset-tokens:
- 2.336s
x-request-id:
- 8d3e6826e88e77fb2cbce01166ddc550
status:
code: 200
message: OK
version: 1
2 changes: 1 addition & 1 deletion tests/integration/goal_oriented/test_browse_website.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

@requires_api_key("OPENAI_API_KEY")
@pytest.mark.vcr
def test_browse_website(browser_agent: Agent) -> None:
def test_browse_website(browser_agent: Agent, patched_api_requestor) -> None:
file_path = browser_agent.workspace.get_path("browse_website.txt")
try:
run_interaction_loop(browser_agent, 120)
Expand Down
7 changes: 5 additions & 2 deletions tests/integration/goal_oriented/test_write_file.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
import os

import openai
import pytest

from autogpt.agent import Agent
Expand All @@ -8,10 +11,10 @@

@requires_api_key("OPENAI_API_KEY")
@pytest.mark.vcr
def test_write_file(writer_agent: Agent) -> None:
def test_write_file(writer_agent: Agent, patched_api_requestor) -> None:
file_path = str(writer_agent.workspace.get_path("hello_world.txt"))
try:
run_interaction_loop(writer_agent, 40)
run_interaction_loop(writer_agent, 200)
# catch system exit exceptions
except SystemExit: # the agent returns an exception when it shuts down
content = read_file(file_path)
Expand Down
Loading

0 comments on commit 4143d21

Please sign in to comment.