Skip to content

Commit

Permalink
Merge pull request #24 from Significant-Gravitas/swiftyos/agpt-276
Browse files Browse the repository at this point in the history
added formatting workflow
  • Loading branch information
Swiftyos authored Feb 14, 2024
2 parents 81ecffc + 0ad899b commit 427dba6
Show file tree
Hide file tree
Showing 23 changed files with 106 additions and 69 deletions.
34 changes: 34 additions & 0 deletions .github/workflows/formatting.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
name: Python Code Quality

on:
push:
branches: [ main ]
pull_request:
branches: [ main ]

jobs:
code-quality:
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- uses: actions/checkout@v3
- name: Set up Python 3.11
uses: actions/setup-python@v4
with:
python-version: 3.11

- name: Install Dependencies
run: |
python -m pip install --upgrade pip
pip install isort ruff
- name: Run isort
run: isort .

- name: Run ruff formatter
run: ruff format .

- uses: stefanzweifel/git-auto-commit-action@v4
with:
commit_message: 'style fixes by ruff'
4 changes: 4 additions & 0 deletions .ruff.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@

line-length = 88
target-version = "py39"
select = ["E", "W"]
2 changes: 1 addition & 1 deletion codex/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,12 +77,12 @@ def process_app(app_name: str) -> None:
@cli.command()
def serve() -> None:
import uvicorn

from codex.app import app

uvicorn.run(app, host="0.0.0.0", port=os.environ.get("PORT", 8000))



if __name__ == "__main__":
setup_logging(local=os.environ.get("ENV", "CLOUD").lower() == "local")
cli()
1 change: 0 additions & 1 deletion codex/prompts/claude/requirements/AskFunction.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from typing import Callable


ASK_FUNCTION_AUTO = """
Human: You are a professional answer. Your answers are always well thought out and thorough, but say no more than they need to answer the question. When you answer the question, you provide your thoughts first, set off by <thoughts> tags.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@
Assistant: Okay, ask away.
Human: You are an expert product owner.
Human: You are an expert product owner.
Given the basic project description and the clarification, Q&A write a detailed product description.
Given the basic project description and the clarification, Q&A write a detailed product description.
The project description should be at least 750 words. Be clear and exhaustive such that it is easy to determine the functional and non-functional requirements of the product.
You should carefully think about it step by step and share those answers in a block set off by <think>thoughts here</think>.
Expand Down
2 changes: 1 addition & 1 deletion codex/prompts/claude/requirements/EndpointGeneration.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ class ResponseModel(BaseModel):
Example for `GET /user_id_by_discord_id/{id}`:
<think>I'll need to leverage the context provided to ensure the UserDTO object makes sense. I'll also provide any models needed for my return types if they aren't based on the db model from prisma</think>
<think>I'll need to leverage the context provided to ensure the UserDTO object makes sense. I'll also provide any models needed for my return types if they aren't based on the db model from prisma</think>
<db_models_needed>
[User]
</db_models_needed>
Expand Down
2 changes: 1 addition & 1 deletion codex/prompts/claude/requirements/ModuleRefinement.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
</endpoint>
... repeat for all related endpoints in a group ie /user GET PUT
</endoint_group>
... repeat endpoints and groups until ALL requirements are satisfied
... repeat endpoints and groups until ALL requirements are satisfied
</endpoints>
</module>
... repeat for each module
Expand Down
2 changes: 1 addition & 1 deletion codex/prompts/claude/requirements/NestJSDocs.py
Original file line number Diff line number Diff line change
Expand Up @@ -1818,7 +1818,7 @@ class SequelizeConfigService implements SequelizeOptionsFactory {
"""

NEST_JS_CLI_RULES = """
NEST_JS_CLI_RULES = r"""
### CLI command reference
#### nest new
Expand Down
10 changes: 5 additions & 5 deletions codex/prompts/claude/requirements/ProductIntoRequirement.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
> Is monetization implemented through authorization? [Yes, No, N/A]
> Do we need Authentication? [Yes, No]
> Do we need authorization? [Yes, No, N/A]
> What authorization roles do we need? For this question, answer an array of One Word Roles such as [UserType1, UserType2, UserType3, N/A].
> What authorization roles do we need? For this question, answer an array of One Word Roles such as [UserType1, UserType2, UserType3, N/A].
"""

FEATURE_BASELINE_CHECKS = """
Expand All @@ -31,12 +31,12 @@
Assistant: Okay, ask away.
Human: You are an expert product manager who works to help us ensure our MVPs are reasonable.
You should carefully think about it step by step, and share those answers in a block set off by <think>thoughts here</think>.
Each question will be in the following form: >Question? [ValidAnswer1, ValidAnswer2, ...].
You should answer with only one of the valid options next to each question.
You should carefully think about it step by step, and share those answers in a block set off by <think>thoughts here</think>.
Each question will be in the following form: >Question? [ValidAnswer1, ValidAnswer2, ...].
You should answer with only one of the valid options next to each question.
Set the answer off by
<answer>
<wrapper>
<wrapper>
<question>question</question>
<think>thoughts on this question. Don't answer the question here, just put the raw thoughts</think>
<answer>valid answer</answer>
Expand Down
5 changes: 2 additions & 3 deletions codex/prompts/claude/requirements/RequirementIntoModule.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
from codex.prompts.claude.requirements.NestJSDocs import (
NEST_JS_CRUD_GEN,
NEST_JS_FIRST_STEPS,
NEST_JS_MODULES,
NEST_JS_SQL,
NEST_JS_FIRST_STEPS,
)


# This is a two part prompt!!!
REQUIREMENTS_INTO_MODULES = """
Human: Here is some background that is useful:
Expand Down Expand Up @@ -84,7 +83,7 @@
</think_anti>
<answer> <module> <name> name of the module </name> <description> a description of the module </description> <command> command to generate the module in the following format `nest g module cats` </command> </module> ... repeat for other modules </answer> <concluding_think> thoughts </concluding_think>
Assistant:
Assistant:
"""
# becomes modules
Expand Down
14 changes: 7 additions & 7 deletions codex/prompts/claude/requirements/TaskIntoClarifcations.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,15 @@
Only reply with the specified tags.
Assistant:
Assistant:
"""

FRONTEND_QUESTION = "Do we need a front end?"

MORE_INFO_BASE_CLARIFICATIONS_FRONTEND = """
Human: You are an expert product manager who works to help us ensure our projects are reasonable. You should carefully think about it step by step, and share those answers in a block set off by <think>thoughts here</think>. You should answer with only a yes or no. Set the answer off by <answer>Yes/No</answer>
Do we need a front end for this:
Do we need a front end for this:
"{project_description}"
Assistant: <think>
Expand All @@ -41,7 +41,7 @@
Assistant: Okay, ask away.
Human: You are an expert product manager who works to help us ensure our projects are reasonable. You should carefully think about it step by step, and share those answers in a block set off by <think>thoughts here</think>. You should answer with only an expected user persona or personas. Set the answer off by <answer>reply here</answer>.
Human: You are an expert product manager who works to help us ensure our projects are reasonable. You should carefully think about it step by step, and share those answers in a block set off by <think>thoughts here</think>. You should answer with only an expected user persona or personas. Set the answer off by <answer>reply here</answer>.
Who is the expected user of this: "{project_description}"
Expand All @@ -57,9 +57,9 @@
Assistant: Okay, ask away.
Human: You are an expert product manager who works to help us ensure our projects are reasonable. You should carefully think about it step by step, and share those answers in a block set off by <think>thoughts here</think>. You should answer with only an expected skill level of the user persona or personas. Set the answer off by <answer>reply here</answer>.
Human: You are an expert product manager who works to help us ensure our projects are reasonable. You should carefully think about it step by step, and share those answers in a block set off by <think>thoughts here</think>. You should answer with only an expected skill level of the user persona or personas. Set the answer off by <answer>reply here</answer>.
What is the skill level of the expected user of this:
What is the skill level of the expected user of this:
"{project_description}"
Assistant: <think>
Expand All @@ -72,12 +72,12 @@
Assistant: Okay, ask away.
Human:
Human:
Answer as an expert product owner.
For the following project description, ask a series of at least 10 questions that will clarify what is being requested.
Think carefully, ensuring questions do not overlap, and all ambiguities are covered. Then, generate your best guess at the answer, basing it on your knowledge and best practices. You should carefully think about it step by step and share those thoughts in a block set off by <think>thoughts here</think>. For each reply, you should set the answer off by <answer>reply here</answer>. Pick questions that directly affect the direction of the product rather than marketing, budgets, or timeframes, stakeholders etc as those are already answered in the Q&A.
An example of that would be
An example of that would be
<think>your thoughts on the overall process, thinking step by step</think>
<answer>
<wrapper>
Expand Down
40 changes: 18 additions & 22 deletions codex/requirements/agent.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,21 @@
from asyncio import run
import logging
from asyncio import run

import openai
import prisma
from prisma.enums import AccessLevel

from codex.common.ai_block import Indentifiers
from codex.prompts.claude.requirements.AskFunction import *
from codex.prompts.claude.requirements.ClarificationsIntoProduct import *
from codex.prompts.claude.requirements.EndpointGeneration import *
from codex.prompts.claude.requirements.ModuleIntoDatabase import *
from codex.prompts.claude.requirements.ModuleRefinement import *
from codex.prompts.claude.requirements.ProductIntoRequirement import *
from codex.prompts.claude.requirements.QAFormat import *
from codex.prompts.claude.requirements.RequirementIntoModule import *
from codex.prompts.claude.requirements.SearchFunction import *
from codex.prompts.claude.requirements.TaskIntoClarifcations import *
from codex.requirements import flatten_endpoints
from codex.requirements.ai_clarify import (
FrontendClarificationBlock,
Expand All @@ -14,6 +24,10 @@
UserSkillClarificationBlock,
)
from codex.requirements.ai_feature import FeatureGenerationBlock
from codex.requirements.build_requirements_refinement_object import (
RequirementsRefined,
convert_requirements,
)
from codex.requirements.complete import complete_and_parse, complete_anth
from codex.requirements.database import create_spec
from codex.requirements.gather_task_info import gather_task_info_loop
Expand Down Expand Up @@ -44,26 +58,8 @@
ResponseModel,
StateObj,
)

from codex.prompts.claude.requirements.ClarificationsIntoProduct import *
from codex.prompts.claude.requirements.ProductIntoRequirement import *
from codex.prompts.claude.requirements.RequirementIntoModule import *
from codex.prompts.claude.requirements.TaskIntoClarifcations import *
from codex.prompts.claude.requirements.ModuleRefinement import *
from codex.prompts.claude.requirements.ModuleIntoDatabase import *
from codex.prompts.claude.requirements.EndpointGeneration import *
from codex.prompts.claude.requirements.AskFunction import *
from codex.prompts.claude.requirements.QAFormat import *
from codex.prompts.claude.requirements.SearchFunction import *
from codex.requirements.build_requirements_refinement_object import (
convert_requirements,
RequirementsRefined,
)
from codex.requirements.parser import parse
from codex.requirements.unwrap_schemas import (
convert_endpoint,
unwrap_db_schema,
)
from codex.requirements.unwrap_schemas import convert_endpoint, unwrap_db_schema

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -421,11 +417,11 @@ async def populate_database_specs():

oai = openai.OpenAI()

task = """The Tutor App is an app designed for tutors to manage their clients, schedules, and invoices.
task = """The Tutor App is an app designed for tutors to manage their clients, schedules, and invoices.
It must support both the client and tutor scheduling, rescheduling and canceling appointments, and sending invoices after the appointment has passed.
Clients can sign up with OAuth2 or with traditional sign-in authentication. If they sign up with traditional authentication, it must be safe and secure. There will need to be password reset and login capabilities.
Clients can sign up with OAuth2 or with traditional sign-in authentication. If they sign up with traditional authentication, it must be safe and secure. There will need to be password reset and login capabilities.
There will need to be authorization for identifying clients vs the tutor.
Expand Down
8 changes: 6 additions & 2 deletions codex/requirements/ai_clarify.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,12 @@

from pydantic import BaseModel

from codex.common.ai_block import Indentifiers
from codex.common.ai_block import AIBlock, ValidatedResponse, ValidationError
from codex.common.ai_block import (
AIBlock,
Indentifiers,
ValidatedResponse,
ValidationError,
)
from codex.requirements.model import Clarification, QandA, QandAResponses


Expand Down
8 changes: 6 additions & 2 deletions codex/requirements/ai_feature.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,12 @@

from pydantic import BaseModel

from codex.common.ai_block import Indentifiers
from codex.common.ai_block import AIBlock, ValidatedResponse, ValidationError
from codex.common.ai_block import (
AIBlock,
Indentifiers,
ValidatedResponse,
ValidationError,
)
from codex.requirements.model import (
Clarification,
FeaturesSuperObject,
Expand Down
7 changes: 3 additions & 4 deletions codex/requirements/build_requirements_refinement_object.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
import enum
from typing import List

from fuzzywuzzy import fuzz, process
from pydantic import BaseModel
from pydantic import BaseModel
import enum

from codex.requirements.model import ReplyEnum, RequirementsRefined
from fuzzywuzzy import fuzz
from fuzzywuzzy import process


def convert_requirements(requirements_qa) -> RequirementsRefined:
Expand Down
1 change: 1 addition & 0 deletions codex/requirements/choose_tool.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# Tools
from typing import Callable, Optional

from codex.requirements.complete import complete_anth


Expand Down
3 changes: 2 additions & 1 deletion codex/requirements/complete.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
# Anthropic Completion
from enum import Enum
from typing import Type, TypeVar
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT

from anthropic import AI_PROMPT, HUMAN_PROMPT, Anthropic
from anthropic.types import Completion as ACompletion
from pydantic import BaseModel, ValidationError

Expand Down
9 changes: 2 additions & 7 deletions codex/requirements/flatten_endpoints.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,6 @@
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
from fuzzywuzzy import fuzz, process

from codex.requirements.model import (
Endpoint,
EndpointGroupWrapper,
EndpointWrapper,
)
from codex.requirements.model import Endpoint, EndpointGroupWrapper, EndpointWrapper


def flatten_endpoints(
Expand Down
8 changes: 4 additions & 4 deletions codex/requirements/gather_task_info.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
# Task Breakdown Micro Agent
from typing import Callable, Optional

from anthropic import AI_PROMPT, HUMAN_PROMPT
from codex.prompts.claude.requirements.TaskIntoClarifcations import (
TASK_INTO_MORE_INFO,
)

from codex.prompts.claude.requirements.TaskIntoClarifcations import TASK_INTO_MORE_INFO
from codex.requirements.choose_tool import choose_tool
from codex.requirements.complete import complete_anth

Expand All @@ -17,7 +17,7 @@ def gather_task_info_loop(
print(x)
response = complete_anth(running_message)
print(response)
if not "finished:" in response.strip():
if "finished:" not in response.strip():
next_message = choose_tool(raw_prompt=response, ask_callback=ask_callback)
print(next_message)
running_message += response + HUMAN_PROMPT + next_message + AI_PROMPT
Expand Down
2 changes: 1 addition & 1 deletion codex/requirements/matching.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from fuzzywuzzy import process, fuzz
from fuzzywuzzy import fuzz, process


def find_best_match(target: str, choices: list[str], threshold: int = 80):
Expand Down
2 changes: 1 addition & 1 deletion codex/requirements/model.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from dataclasses import dataclass
import enum
from dataclasses import dataclass
from typing import List, Literal, Optional

from prisma.enums import AccessLevel
Expand Down
3 changes: 2 additions & 1 deletion codex/requirements/parser.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from typing import Type, TypeVar
from gravitasml.token import tokenize, Token

from gravitasml.parser import Parser
from gravitasml.token import Token, tokenize
from pydantic import BaseModel

T = TypeVar("T", bound=BaseModel)
Expand Down
Loading

0 comments on commit 427dba6

Please sign in to comment.