Skip to content

Commit

Permalink
refactor(config): make a single source of truth file (All-Hands-AI#524)
Browse files Browse the repository at this point in the history
* refactor

* fix nits

* add get from env

* refactor logic
  • Loading branch information
yufansong committed Apr 2, 2024
1 parent 1af287a commit 324a00f
Show file tree
Hide file tree
Showing 6 changed files with 45 additions and 21 deletions.
4 changes: 2 additions & 2 deletions agenthub/monologue_agent/utils/memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,15 @@
from opendevin import config
from . import json

embedding_strategy = config.get_or_default("LLM_EMBEDDING_MODEL", "local")
embedding_strategy = config.get("LLM_EMBEDDING_MODEL")

# TODO: More embeddings: https://docs.llamaindex.ai/en/stable/examples/embeddings/OpenAI/
# There's probably a more programmatic way to do this.
if embedding_strategy == "llama2":
from llama_index.embeddings.ollama import OllamaEmbedding
embed_model = OllamaEmbedding(
model_name="llama2",
base_url=config.get_or_default("LLM_BASE_URL", "http://localhost:8000"),
base_url=config.get_or_error("LLM_BASE_URL"),
ollama_additional_kwargs={"mirostat": 0},
)
elif embedding_strategy == "openai":
Expand Down
29 changes: 28 additions & 1 deletion opendevin/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,33 @@

load_dotenv()

DEFAULT_CONFIG = {
"LLM_API_KEY": None,
"LLM_BASE_URL": None,
"WORKSPACE_DIR": os.path.join(os.getcwd(), "workspace"),
"LLM_MODEL": "gpt-4-0125-preview",
"SANDBOX_CONTAINER_IMAGE": "ghcr.io/opendevin/sandbox",
"RUN_AS_DEVIN": "false",
"LLM_EMBEDDING_MODEL": "local",
"LLM_NUM_RETRIES": 6,
"LLM_COOLDOWN_TIME" : 1,
"DIRECTORY_REWRITE" : "",
"PROMPT_DEBUG_DIR": "",
}

config_str = ""
if os.path.exists("config.toml"):
with open("config.toml", "rb") as f:
config_str = f.read().decode("utf-8")

config = toml.loads(config_str)
tomlConfig = toml.loads(config_str)
config = DEFAULT_CONFIG.copy()
for key, value in config.items():
if key in os.environ:
config[key] = os.environ[key]
elif key in tomlConfig:
config[key] = tomlConfig[key]


def _get(key: str, default):
value = config.get(key, default)
Expand Down Expand Up @@ -38,3 +59,9 @@ def get_or_none(key: str):
Get a key from the config, or return None if it doesn't exist.
"""
return _get(key, None)

def get(key: str):
"""
Get a key from the config, please make sure it exists.
"""
return config.get(key)
12 changes: 6 additions & 6 deletions opendevin/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,12 @@

from opendevin import config

DEFAULT_MODEL_NAME = config.get_or_default("LLM_MODEL", "gpt-4-0125-preview")
DEFAULT_API_KEY = config.get_or_none("LLM_API_KEY")
DEFAULT_BASE_URL = config.get_or_none("LLM_BASE_URL")
DEFAULT_LLM_NUM_RETRIES = config.get_or_default("LLM_NUM_RETRIES", 6)
DEFAULT_LLM_COOLDOWN_TIME = config.get_or_default("LLM_COOLDOWN_TIME", 1)
PROMPT_DEBUG_DIR = config.get_or_default("PROMPT_DEBUG_DIR", "")
DEFAULT_API_KEY = config.get("LLM_API_KEY")
DEFAULT_BASE_URL = config.get("LLM_BASE_URL")
DEFAULT_MODEL_NAME = config.get("LLM_MODEL")
DEFAULT_LLM_NUM_RETRIES = config.get("LLM_NUM_RETRIES")
DEFAULT_LLM_COOLDOWN_TIME = config.get("LLM_COOLDOWN_TIME")
PROMPT_DEBUG_DIR = config.get("PROMPT_DEBUG_DIR")

class LLM:
def __init__(self,
Expand Down
2 changes: 1 addition & 1 deletion opendevin/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def parse_arguments():
parser.add_argument("-t", "--task", type=str, default="", help="The task for the agent to perform")
parser.add_argument("-f", "--file", type=str, help="Path to a file containing the task. Overrides -t if both are provided.")
parser.add_argument("-c", "--agent-cls", default="MonologueAgent", type=str, help="The agent class to use")
parser.add_argument("-m", "--model-name", default=config.get_or_default("LLM_MODEL", "gpt-4-0125-preview"), type=str, help="The (litellm) model name to use")
parser.add_argument("-m", "--model-name", default=config.get("LLM_MODEL"), type=str, help="The (litellm) model name to use")
parser.add_argument("-i", "--max-iterations", default=100, type=int, help="The maximum number of iterations to run the agent")
return parser.parse_args()

Expand Down
9 changes: 3 additions & 6 deletions opendevin/sandbox/sandbox.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,12 @@
InputType = namedtuple("InputType", ["content"])
OutputType = namedtuple("OutputType", ["content"])

DIRECTORY_REWRITE = config.get_or_default(
"DIRECTORY_REWRITE", ""
) # helpful for docker-in-docker scenarios

CONTAINER_IMAGE = config.get_or_default("SANDBOX_CONTAINER_IMAGE", "ghcr.io/opendevin/sandbox")
DIRECTORY_REWRITE = config.get("DIRECTORY_REWRITE") # helpful for docker-in-docker scenarios
CONTAINER_IMAGE = config.get("SANDBOX_CONTAINER_IMAGE")

# FIXME: On some containers, the devin user doesn't have enough permission, e.g. to install packages
# How do we make this more flexible?
RUN_AS_DEVIN = config.get_or_default("RUN_AS_DEVIN", "false").lower() != "false"
RUN_AS_DEVIN = config.get("RUN_AS_DEVIN").lower() != "false"
USER_ID = 1000
if config.get_or_none("SANDBOX_USER_ID") is not None:
USER_ID = int(config.get_or_default("SANDBOX_USER_ID", ""))
Expand Down
10 changes: 5 additions & 5 deletions opendevin/server/session.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,11 @@
from opendevin.llm.llm import LLM
from opendevin.observation import Observation, UserMessageObservation

DEFAULT_API_KEY = config.get_or_none("LLM_API_KEY")
DEFAULT_BASE_URL = config.get_or_none("LLM_BASE_URL")
DEFAULT_WORKSPACE_DIR = config.get_or_default("WORKSPACE_DIR", os.path.join(os.getcwd(), "workspace"))
LLM_MODEL = config.get_or_default("LLM_MODEL", "gpt-4-0125-preview")
CONTAINER_IMAGE = config.get_or_default("SANDBOX_CONTAINER_IMAGE", "ghcr.io/opendevin/sandbox")
DEFAULT_API_KEY = config.get("LLM_API_KEY")
DEFAULT_BASE_URL = config.get("LLM_BASE_URL")
DEFAULT_WORKSPACE_DIR = config.get("WORKSPACE_DIR")
LLM_MODEL = config.get("LLM_MODEL")
CONTAINER_IMAGE = config.get("SANDBOX_CONTAINER_IMAGE")

class Session:
"""Represents a session with an agent.
Expand Down

0 comments on commit 324a00f

Please sign in to comment.