Skip to content

Commit

Permalink
feat: ability to configure temperature and top-p sampling for llm gen…
Browse files Browse the repository at this point in the history
…eration (All-Hands-AI#1556)

Co-authored-by: Jim Su <jimsu@protonmail.com>
  • Loading branch information
Jiayi-Pan and yimothysu committed May 3, 2024
1 parent b2a99b5 commit bccb829
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 0 deletions.
2 changes: 2 additions & 0 deletions opendevin/core/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@
ConfigType.AGENT_MEMORY_ENABLED: False,
ConfigType.LLM_TIMEOUT: None,
ConfigType.LLM_MAX_RETURN_TOKENS: None,
ConfigType.LLM_TEMPERATURE: None,
ConfigType.LLM_TOP_P: None,
# GPT-4 pricing is $10 per 1M input tokens. Since tokenization happens on LLM side,
# we cannot easily count number of tokens, but we can count characters.
# Assuming 5 characters per token, 5 million is a reasonable default limit.
Expand Down
2 changes: 2 additions & 0 deletions opendevin/core/schema/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@


class ConfigType(str, Enum):
LLM_TOP_P = 'LLM_TOP_P'
LLM_TEMPERATURE = 'LLM_TEMPERATURE'
LLM_MAX_RETURN_TOKENS = 'LLM_MAX_RETURN_TOKENS'
LLM_TIMEOUT = 'LLM_TIMEOUT'
LLM_API_KEY = 'LLM_API_KEY'
Expand Down
6 changes: 6 additions & 0 deletions opendevin/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@
LLM_RETRY_MAX_WAIT = config.get(ConfigType.LLM_RETRY_MAX_WAIT)
LLM_TIMEOUT = config.get(ConfigType.LLM_TIMEOUT)
LLM_MAX_RETURN_TOKENS = config.get(ConfigType.LLM_MAX_RETURN_TOKENS)
LLM_TEMPERATURE = config.get(ConfigType.LLM_TEMPERATURE)
LLM_TOP_P = config.get(ConfigType.LLM_TOP_P)


class LLM:
Expand All @@ -45,6 +47,8 @@ def __init__(
retry_max_wait=LLM_RETRY_MAX_WAIT,
llm_timeout=LLM_TIMEOUT,
llm_max_return_tokens=LLM_MAX_RETURN_TOKENS,
llm_temperature=LLM_TEMPERATURE,
llm_top_p=LLM_TOP_P,
):
"""
Args:
Expand Down Expand Up @@ -80,6 +84,8 @@ def __init__(
api_version=self.api_version,
max_tokens=self.llm_max_return_tokens,
timeout=self.llm_timeout,
temperature=llm_temperature,
top_p=llm_top_p,
)

completion_unwrapped = self._completion
Expand Down

0 comments on commit bccb829

Please sign in to comment.