From 24428e95ffedda211e437bce79548f6d96543831 Mon Sep 17 00:00:00 2001 From: Antony Repin <53556648+lehcode@users.noreply.github.com> Date: Sun, 14 Apr 2024 18:41:08 +0300 Subject: [PATCH 01/11] Merge develop MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add ollama, support, memGPT services * feat: Docker services hotfix: Add OS locales hotfix: Fix start configuration hotfix: Temp fix of build errors feat: Configure OpenDevin UI container hotfix: Run OpenDevin application container chore: Update README feat: Docker Configuration for backend services feat: Ubuntu 20.04 images with CUDA and Miniconda3 refactor: Update docker environment configuration feat: Decouple UI and Python app into services. Co-authored-by: Jim Su * hotfix: Restore useTranslation() * hotfix: Frontend integration * hotfix: App Conda environment fix * hotfix: Backend app service dependencies fix under Conda * feat: Add API startup script hotfix: Integration fix * feat: Designate build directory * feat: Add FastAPI server and Vite dev server logging for debug and live modes * chore: Cleanup after local rebase * feat: Improve docker compose services integration * chore: Remove unnecessary @ts-expect-error * hotfix: Frontend and API integration. Build improvements. * feat/poetry-build (#8) * refactor: Remove unnecessary code * refactor: Update devin hostname to 'devin' * refactor: Update Makefile to use Docker compose --------- Co-authored-by: Jim Su Merge develop (#12) * fix: fix some of the styling to more closely match figma (#927) * fix: fix some of the styling to more closely match figma * overflow * Add Italian, Spanish and Português (#1017) * Update index.ts Add Italian, Spanish and Português * Update translation.json Add Italian. Spanish and Português * Remove unnecessary i18n initialization arguments --------- Co-authored-by: Jim Su * Add Azure configuration doc (#1035) * Add Azure configuration doc * Add link to Azure doc. * Formatting AZURE_LLM_GUIDE (#1046) * Feat add agent manager (#904) * feat: add agent manager to manage all agents; * extract the host ssh port to prevent conflict. * clean all containers with prefix is sandbox- * merge from upstream/main * merge from upstream/main * Update frontend/src/state/settingsSlice.ts * Update opendevin/sandbox/ssh_box.py * Update opendevin/sandbox/exec_box.py --------- Co-authored-by: Robert Brennan * simplified get (#962) * simplified get * resolved merge conflicts * removed default param for get * Update opendevin/config.py --------- Co-authored-by: Robert Brennan * Response recognition for weak llms (#523) * Tweak for weak llms * Update to the latest commits * Update to the latest commits * Fix lint errors * Remove merge artifact --------- Co-authored-by: Jim Su * Traffic Control: Add new config MAX_CHARS (#1015) * Add new config MAX_CHARS * Fix mypy linting issues * fix: print the wrong ssh port number (#1054) * fix(editor): ui enhancements and code refactor (#1069) * Add new sandbox type - local (#1029) * Auto-close stale issues and PRs (#1032) * stale issues * Update .github/workflows/stale.yml Co-authored-by: Boxuan Li * Update .github/workflows/stale.yml Co-authored-by: Boxuan Li * Update .github/workflows/stale.yml Co-authored-by: Boxuan Li * Update .github/workflows/stale.yml Co-authored-by: Boxuan Li --------- Co-authored-by: Boxuan Li Co-authored-by: Graham Neubig * Throw error if an illegal sandbox type is used (#1087) * Unify linter behaviour across CI and pre-commit-hook (#1071) * CI: Add autopep8 linter Currently, we have autopep8 as part of pre-commit-hook. To ensure consistent behaviour, we should have it in CI as well. Moreover, pre-commit-hook contains a double-quote-string-fixer hook which changes all double quotes to single quotes, but I do observe some PRs with massive changes that do the opposite way. I suspect that these authors 1) disable or circumvent the pre-commit-hook, and 2) have other linters such as black in their IDE, which automatically change all single quotes to double quotes. This has caused a lot of unnecessary diff, made review really hard, and led to a lot of conflicts. * Use -diff for autopep8 * autopep8: Freeze version in CI * Ultimate fix * Remove pep8 long line disable workaround * Fix lint.yml * Fix all files under opendevin and agenthub * Revamp Exception handling (#1080) * Revamp exception handling * Agent controller: sleep 3 seconds if APIConnection error * Fix AuthenticationError capture * Revert unrelated style fixes * Add type enforcement for action_from_dict call * Add ollama, support, memGPT services * feat: Docker services hotfix: Add OS locales hotfix: Fix start configuration hotfix: Temp fix of build errors feat: Configure OpenDevin UI container hotfix: Run OpenDevin application container chore: Update README feat: Docker Configuration for backend services feat: Ubuntu 20.04 images with CUDA and Miniconda3 refactor: Update docker environment configuration feat: Decouple UI and Python app into services. Co-authored-by: Jim Su * hotfix: Restore useTranslation() * hotfix: Frontend integration * hotfix: App Conda environment fix * hotfix: Backend app service dependencies fix under Conda * feat: Add API startup script hotfix: Integration fix * feat: Designate build directory * feat: Add FastAPI server and Vite dev server logging for debug and live modes * chore: Cleanup after local rebase * feat: Improve docker compose services integration * chore: Remove unnecessary @ts-expect-error * doc: Add supplementary notes for WSL2 users to Local LLM Guide (#1031) * Add supplementary notes for WSL2 users * Add supplementary notes for WSL2 users --------- Co-authored-by: Robert Brennan * added to sudo group (#1091) * hotfix: Frontend and API integration. Build improvements. * feat/poetry-build (#8) * refactor: Remove unnecessary code * refactor: Update devin hostname to 'devin' * refactor: Update Makefile to use Docker compose --------- Co-authored-by: Alex Bäuerle Co-authored-by: PierrunoYT <95778421+PierrunoYT@users.noreply.github.com> Co-authored-by: Jim Su Co-authored-by: Engel Nyst Co-authored-by: Leo Co-authored-by: Robert Brennan Co-authored-by: மனோஜ்குமார் பழனிச்சாமி Co-authored-by: namtacs <95915765+namtacs@users.noreply.github.com> Co-authored-by: Boxuan Li Co-authored-by: Akki Co-authored-by: RaGe Co-authored-by: Graham Neubig Co-authored-by: Z <35617149+FZFR@users.noreply.github.com> --- .dockerignore | 3 - .env | 28 +-- .env.dist | 3 +- .github/workflows/lint.yml | 12 +- .github/workflows/stale.yml | 29 +++ Makefile | 97 +++------- README.md | 16 +- agenthub/__init__.py | 6 +- agenthub/codeact_agent/README.md | 4 +- agenthub/codeact_agent/__init__.py | 2 +- agenthub/codeact_agent/codeact_agent.py | 51 +++--- agenthub/monologue_agent/TODO.md | 1 - agenthub/monologue_agent/__init__.py | 2 +- agenthub/monologue_agent/agent.py | 117 ++++++------ agenthub/monologue_agent/utils/memory.py | 64 +++---- agenthub/monologue_agent/utils/monologue.py | 10 +- agenthub/monologue_agent/utils/prompts.py | 55 ++++-- agenthub/planner_agent/__init__.py | 2 +- agenthub/planner_agent/agent.py | 7 +- agenthub/planner_agent/prompt.py | 50 ++--- docker-compose.yml | 58 +++--- docker/devin/app/.condarc | 8 + docker/devin/app/Dockerfile | 144 ++++++++------- docker/devin/app/configure.py | 20 -- docker/devin/app/devin_up.py | 7 +- docker/devin/app/entrypoint.sh | 29 +-- docker/devin/ui/entrypoint.sh | 23 --- docker/devin/{ui => web_ui}/.env.dist | 0 docker/devin/{ui => web_ui}/Dockerfile | 45 +++-- docker/devin/web_ui/entrypoint.sh | 23 +++ docker/env_debug.sh | 12 -- docker/nginx/nginx.conf | 2 +- docker/python_debug.sh | 14 ++ docs/documentation/AZURE_LLM_GUIDE.md | 42 +++++ docs/documentation/LOCAL_LLM_GUIDE.md | 26 +++ frontend/.eslintrc | 2 + frontend/src/App.test.tsx | 1 - frontend/src/components/ChatInterface.tsx | 6 +- frontend/src/components/CodeEditor.tsx | 49 +++-- frontend/src/components/Files.tsx | 152 ++++++++------- frontend/src/components/IconButton.tsx | 28 +++ frontend/src/components/Input.tsx | 13 +- frontend/src/components/Terminal.tsx | 10 +- frontend/src/components/Workspace.tsx | 44 ++--- frontend/src/i18n/index.ts | 3 + frontend/src/i18n/translation.json | 90 +++++++-- frontend/src/index.css | 2 +- frontend/src/types/ConfigType.tsx | 1 + frontend/vite.config.js | 13 +- opendevin/README.md | 1 - opendevin/action/__init__.py | 36 ++-- opendevin/action/agent.py | 12 +- opendevin/action/base.py | 10 +- opendevin/action/bash.py | 8 +- opendevin/action/browse.py | 13 +- opendevin/action/fileop.py | 14 +- opendevin/action/tasks.py | 4 +- opendevin/agent.py | 32 ++-- opendevin/config.py | 45 ++--- opendevin/controller/agent_controller.py | 65 ++++--- opendevin/controller/command_manager.py | 33 ++-- opendevin/exceptions.py | 63 +++++++ opendevin/files.py | 8 +- opendevin/logger.py | 8 +- opendevin/main.py | 58 +++--- opendevin/mock/listen.py | 8 +- opendevin/observation/__init__.py | 36 ++-- opendevin/observation/base.py | 16 +- opendevin/observation/browse.py | 2 +- opendevin/observation/error.py | 2 +- opendevin/observation/files.py | 4 +- opendevin/observation/message.py | 8 +- opendevin/observation/recall.py | 4 +- opendevin/observation/run.py | 2 +- opendevin/parse_commands.py | 32 ++-- opendevin/plan.py | 6 +- opendevin/sandbox/__init__.py | 2 + opendevin/sandbox/exec_box.py | 134 +++++++------- opendevin/sandbox/local_box.py | 73 ++++++++ opendevin/sandbox/sandbox.py | 6 +- opendevin/sandbox/ssh_box.py | 145 ++++++++------- opendevin/schema/action.py | 32 ++-- opendevin/schema/config.py | 2 + opendevin/schema/observation.py | 18 +- opendevin/server/agent/__init__.py | 4 +- opendevin/server/agent/agent.py | 185 +++++++++++++++++++ opendevin/server/agent/manager.py | 190 +++---------------- opendevin/server/auth/__init__.py | 2 +- opendevin/server/listen.py | 14 +- opendevin/server/session/__init__.py | 7 +- opendevin/server/session/manager.py | 34 ++-- opendevin/server/session/session.py | 5 +- opendevin/state.py | 9 +- opendevin/utils/__init__.py | 3 + opendevin/utils/system.py | 15 ++ pyproject.toml | 4 + requirements.txt | 193 -------------------- 97 files changed, 1644 insertions(+), 1389 deletions(-) create mode 100644 .github/workflows/stale.yml delete mode 100644 docker/devin/app/configure.py mode change 100755 => 100644 docker/devin/app/entrypoint.sh delete mode 100755 docker/devin/ui/entrypoint.sh rename docker/devin/{ui => web_ui}/.env.dist (100%) rename docker/devin/{ui => web_ui}/Dockerfile (69%) create mode 100755 docker/devin/web_ui/entrypoint.sh create mode 100644 docker/python_debug.sh create mode 100644 docs/documentation/AZURE_LLM_GUIDE.md create mode 100644 frontend/src/components/IconButton.tsx create mode 100644 opendevin/exceptions.py create mode 100644 opendevin/sandbox/local_box.py create mode 100644 opendevin/server/agent/agent.py create mode 100644 opendevin/utils/__init__.py create mode 100644 opendevin/utils/system.py delete mode 100644 requirements.txt diff --git a/.dockerignore b/.dockerignore index 04a3b0bcfb4e..693012223601 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,8 +1,5 @@ -.gitattributes .github .idea -.gitignore .ollama LICENSE -README.md **/docs diff --git a/.env b/.env index 055d8b519118..64e86c757cf6 100644 --- a/.env +++ b/.env @@ -7,14 +7,14 @@ UI_HTTP_PORT=4173 UI_HTTPS_PORT=8443 # OpenDevin API host, port DEVIN_HOST=172.28.0.222 -DEVIN_API_PORT=3080 +DEVIN_API_PORT=4488 # OpenDevin websockets port DEVIN_WS_PORT=3000 # # Default Ollama model -LITELLM_DEFAULT_MODEL=mistral +DEFAULT_CHAT_MODEL=mistral:7b # Default models -EMBEDDING_MODEL=llama2 +DEFAULT_EMBEDDINGS_MODEL=llama2 # # Redis ports at the HOST side REDIS_SERVER_PORT=16379 @@ -27,28 +27,29 @@ POSTGRES_DB=litellm_memory POSTGRES_HOST_PORT=15432 POSTGRES_CONTAINER_PORT=5432 # -# Directories -APP_DIR=/opt/opendevin/app +# Directories inside a container +APP_ROOT=/opt/opendevin/app +WORKSPACE_DIR=/opt/opendevin/workspace +CONDA_ROOT=/var/lib/miniconda # Path to ollama models directory at the host machine HOST_MODELS_DIR=/mnt/g/LLMs/ollama/models -WORKSPACE_DIR=/opt/opendevin/workspace -PYTHONPATH=/opt/opendevin/app # Name of the container's Conda vitual environment VENV_NAME=od_env # # Toggle debug mode: empty value or 'yes' DEBUG=yes -# +# Secure mode: HTTPS, authorization etc. +# In development +SECURE_MODE= # NodeJS environment mode. 'development' or 'production' # Node's default is development if omitted NODE_ENV=development -# Specify timezone if necessary -TZ=Etc/UTC # # NVidia CUDA driver version. # Will download new image if changed CUDA_VERSION=12.4.0 -# NodeJS version +# +# NodeJS and NPM versions of the UI image NODE_VERSION=18.20.1 NPM_VERSION=10.5.2 NODE_OPTIONS="" @@ -57,6 +58,9 @@ NODE_OPTIONS="" MITMPROXY_VERSION=10.2.4 # mitmproxy directory INSIDE THE CONTAINER MITMPROXY_DIR=/home/mitmproxy/.mitmproxy +# +# Localization variables # App locale LANG=en_US.UTF-8 - +# Specify timezone if necessary +TZ=Etc/UTC diff --git a/.env.dist b/.env.dist index a0c0e5f120b7..e6eb06f1ef15 100644 --- a/.env.dist +++ b/.env.dist @@ -13,7 +13,7 @@ DEVIN_API_PORT=3080 DEVIN_WS_PORT=3000 # # Default Ollama model -LITELLM_DEFAULT_MODEL=mistral +LITELLM_DEFAULT_MODEL=mistral:70b # Default models EMBEDDING_MODEL=llama2 # @@ -57,4 +57,3 @@ NPM_VERSION=10.5.2 MITMPROXY_VERSION=10.2.4 # mitmproxy directory INSIDE THE CONTAINER MITMPROXY_DIR=/home/mitmproxy/.mitmproxy - diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 80e4826b76df..59398cdc64f5 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -32,11 +32,7 @@ jobs: uses: actions/setup-python@v5 with: python-version: 3.11 - - name: Create mypy cache directory - run: mkdir -p .mypy_cache - - name: Install dependencies - run: pip install ruff mypy==1.9.0 types-PyYAML types-toml - - name: Run mypy - run: python -m mypy --install-types --non-interactive --config-file dev_config/python/mypy.ini opendevin/ agenthub/ - - name: Run ruff - run: ruff check --config dev_config/python/ruff.toml opendevin/ agenthub/ + - name: Install pre-commit + run: pip install pre-commit + - name: Run pre-commit hooks + run: pre-commit run --files opendevin/**/* agenthub/**/* --show-diff-on-failure --config ./dev_config/python/.pre-commit-config.yaml diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 000000000000..b7e48311e480 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,29 @@ +name: 'Close stale issues' +on: + schedule: + - cron: '30 1 * * *' + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v9 + with: + # Aggressively close issues that have been explicitly labeled `age-out` + any-of-labels: age-out + stale-issue-message: 'This issue is stale because it has been open for 7 days with no activity. Remove stale label or comment or this will be closed in 1 day.' + close-issue-message: 'This issue was closed because it has been stalled for over 7 days with no activity.' + stale-pr-message: 'This PR is stale because it has been open for 7 days with no activity. Remove stale label or comment or this will be closed in 1 days.' + close-pr-message: 'This PR was closed because it has been stalled for over 7 days with no activity.' + days-before-stale: 7 + days-before-close: 1 + + - uses: actions/stale@v9 + with: + # Be more lenient with other issues + stale-issue-message: 'This issue is stale because it has been open for 30 days with no activity. Remove stale label or comment or this will be closed in 7 days.' + close-issue-message: 'This issue was closed because it has been stalled for over 30 days with no activity.' + stale-pr-message: 'This PR is stale because it has been open for 30 days with no activity. Remove stale label or comment or this will be closed in 7 days.' + close-pr-message: 'This PR was closed because it has been stalled for over 30 days with no activity.' + days-before-stale: 30 + days-before-close: 7 diff --git a/Makefile b/Makefile index 26bcc614d461..72442e00907b 100644 --- a/Makefile +++ b/Makefile @@ -1,14 +1,8 @@ # Makefile for OpenDevin project # Variables -DOCKER_IMAGE = ghcr.io/opendevin/sandbox -BACKEND_PORT = 3000 -BACKEND_HOST = "127.0.0.1:$(BACKEND_PORT)" -FRONTEND_PORT = 3001 -DEFAULT_WORKSPACE_DIR = "./workspace" DEFAULT_MODEL = "gpt-3.5-turbo-1106" CONFIG_FILE = config.toml -PRECOMMIT_CONFIG_PATH = "./dev_config/python/.pre-commit-config.yaml" # ANSI color codes GREEN=\033[0;32m @@ -21,44 +15,21 @@ RESET=\033[0m build: @echo "$(GREEN)Building project...$(RESET)" @$(MAKE) -s check-dependencies - @$(MAKE) -s pull-docker-image - @$(MAKE) -s install-python-dependencies - @$(MAKE) -s install-frontend-dependencies - @$(MAKE) -s install-precommit-hooks + @$(MAKE) -s docker-build @echo "$(GREEN)Build completed successfully.$(RESET)" check-dependencies: @echo "$(YELLOW)Checking dependencies...$(RESET)" - @$(MAKE) -s check-python - @$(MAKE) -s check-npm - @$(MAKE) -s check-docker + @$(MAKE) -s check-docker-compose @$(MAKE) -s check-poetry @echo "$(GREEN)Dependencies checked successfully.$(RESET)" -check-python: - @echo "$(YELLOW)Checking Python installation...$(RESET)" - @if command -v python3 > /dev/null; then \ - echo "$(BLUE)$(shell python3 --version) is already installed.$(RESET)"; \ - else \ - echo "$(RED)Python 3 is not installed. Please install Python 3 to continue.$(RESET)"; \ - exit 1; \ - fi - -check-npm: - @echo "$(YELLOW)Checking npm installation...$(RESET)" - @if command -v npm > /dev/null; then \ - echo "$(BLUE)npm $(shell npm --version) is already installed.$(RESET)"; \ - else \ - echo "$(RED)npm is not installed. Please install Node.js to continue.$(RESET)"; \ - exit 1; \ - fi - -check-docker: - @echo "$(YELLOW)Checking Docker installation...$(RESET)" +check-docker-compose: + @echo "$(YELLOW)Checking Docker Compose installation...$(RESET)" @if command -v docker > /dev/null; then \ echo "$(BLUE)$(shell docker --version) is already installed.$(RESET)"; \ else \ - echo "$(RED)Docker is not installed. Please install Docker to continue.$(RESET)"; \ + echo "$(RED)Docker is not installed.\nPlease install Docker Desktop to continue.$(RESET)"; \ exit 1; \ fi @@ -73,47 +44,35 @@ check-poetry: exit 1; \ fi -pull-docker-image: - @echo "$(YELLOW)Pulling Docker image...$(RESET)" - @docker pull $(DOCKER_IMAGE) - @echo "$(GREEN)Docker image pulled successfully.$(RESET)" - -install-python-dependencies: - @echo "$(GREEN)Installing Python dependencies...$(RESET)" - @if [ "$(shell uname)" = "Darwin" ]; then \ - echo "$(BLUE)Installing `chroma-hnswlib`...$(RESET)"; \ - export HNSWLIB_NO_NATIVE=1; \ - poetry run pip install chroma-hnswlib; \ - fi - @poetry install --without evaluation - @echo "$(GREEN)Python dependencies installed successfully.$(RESET)" - -install-frontend-dependencies: - @echo "$(YELLOW)Setting up frontend environment...$(RESET)" - @echo "$(YELLOW)Detect Node.js version...$(RESET)" - @cd frontend && node ./scripts/detect-node-version.js - @cd frontend && \ - echo "$(BLUE)Installing frontend dependencies with npm...$(RESET)" && \ - npm install && \ - echo "$(BLUE)Running make-i18n with npm...$(RESET)" && \ - npm run make-i18n - @echo "$(GREEN)Frontend dependencies installed successfully.$(RESET)" - -install-precommit-hooks: - @echo "$(YELLOW)Installing pre-commit hooks...$(RESET)" - @git config --unset-all core.hooksPath || true - @poetry run pre-commit install --config $(PRECOMMIT_CONFIG_PATH) - @echo "$(GREEN)Pre-commit hooks installed successfully.$(RESET)" +docker-build: + @read -p "Run 'docker compose down'? [Y/n]: " run_down; + @if [ ! -z "$$run_down" ] ; then @docker compose down \ + else exit 0; fi + @echo "$(YELLOW)Building Docker images...$(RESET)" + @docker compose -f docker-compose.yml build --pull > /dev/null + @echo "$(GREEN)Docker images generated successfully.$(RESET)" + +docker-rebuild: + @echo "$(YELLOW)Force rebuilding Docker images...$(RESET)" + @read -p "Run 'docker compose down'? [Y/n]: " run_down; \ + if [ ! -z = "$$run_down" ]; then docker compose down; else exit 0; fi + @docker compose -f docker-compose.yml build --pull --no-cache + @echo "$(GREEN)Docker images updated successfully.$(RESET)" + +docker-start: + @echo "$(YELLOW)Starting Docker services...$(RESET)" + @docker compose up --build + @echo "$(GREEN)All Docker services started$(RESET)" # Start backend start-backend: @echo "$(YELLOW)Starting backend...$(RESET)" - @poetry run uvicorn opendevin.server.listen:app --port $(BACKEND_PORT) + @docker compose up devin # Start frontend start-frontend: @echo "$(YELLOW)Starting frontend...$(RESET)" - @cd frontend && BACKEND_HOST=$(BACKEND_HOST) FRONTEND_PORT=$(FRONTEND_PORT) npm run start + @docker compose up web_ui # Run the app run: @@ -124,7 +83,7 @@ run: fi @mkdir -p logs @echo "$(YELLOW)Starting backend server...$(RESET)" - @poetry run uvicorn opendevin.server.listen:app --port $(BACKEND_PORT) & + @$(MAKE) docker-start @echo "$(YELLOW)Waiting for the backend to start...$(RESET)" @until nc -z localhost $(BACKEND_PORT); do sleep 0.1; done @echo "$(GREEN)Backend started successfully.$(RESET)" @@ -182,4 +141,4 @@ help: @echo " $(GREEN)help$(RESET) - Display this help message, providing information on available targets." # Phony targets -.PHONY: build check-dependencies check-python check-npm check-docker check-poetry pull-docker-image install-python-dependencies install-frontend-dependencies install-precommit-hooks start-backend start-frontend run setup-config setup-config-prompts help +.PHONY: build check-dependencies check-docker check-poetry setup-config setup-config-prompts docker-build docker-start help diff --git a/README.md b/README.md index 166336d2e4df..1f5e0d9808c9 100644 --- a/README.md +++ b/README.md @@ -150,16 +150,6 @@ To configure the LM of your choice, follow these steps: ``` This command will prompt you to enter the LLM API key and model name, ensuring that OpenDevin is tailored to your specific needs. - -You can manually update the `config.toml` file located in the project's root directory. Here, you'll find the `LLM_API_KEY` and `LLM_MODEL_NAME` fields, where you can set the LM of your choosing. -You can configure the LLM Embedding Model using `LLM_EMBEDDING_MODEL` key in `config.toml`. - -```bash config.toml -LLM_API_KEY="sk-12345678" -LLM_MODEL="mistral" -LLM_EMBEDDING_MODEL="llama2" -``` - **Note on Alternative Models:** Some alternative models may prove more challenging to tame than others. Fear not, brave adventurer! We shall soon unveil LLM-specific documentation to guide you on your quest. And if you've already mastered the art of wielding a model other than OpenAI's GPT, we encourage you to [share your setup instructions with us](https://github.com/OpenDevin/OpenDevin/issues/417). @@ -167,7 +157,7 @@ For a full list of the LM providers and models available, please consult the [li There is also [documentation for running with local models using ollama](./docs/documentation/LOCAL_LLM_GUIDE.md). -[Docker Compose Documentation](./docker/README.md) +We are working on a [guide for running OpenDevin with Azure](./docs/documentation/AZURE_LLM_GUIDE.md). ### 4. Run the Application @@ -187,7 +177,7 @@ There is also [documentation for running with local models using ollama](./docs/ ```bash make start-frontend ``` - + ### 6. Help - **Get Some Help:** Need assistance or information on available targets and commands? The help command provides all the necessary guidance to ensure a smooth experience with OpenDevin. @@ -234,7 +224,7 @@ For details, please check [this document](./CONTRIBUTING.md). ## 🤖 Join Our Community -Now we have both Slack workspace for the collaboration on building OpenDevin and Discord server for discussion about anything related, e.g., this project, LLM, agent, etc. +Now we have both Slack workspace for the collaboration on building OpenDevin and Discord server for discussion about anything related, e.g., this project, LLM, agent, etc. * [Slack workspace](https://join.slack.com/t/opendevin/shared_invite/zt-2etftj1dd-X1fDL2PYIVpsmJZkqEYANw) * [Discord server](https://discord.gg/mBuDGRzzES) diff --git a/agenthub/__init__.py b/agenthub/__init__.py index 15db2e3cfaf7..0f4b939f627a 100644 --- a/agenthub/__init__.py +++ b/agenthub/__init__.py @@ -2,8 +2,8 @@ load_dotenv() # Import agents after environment variables are loaded -from . import monologue_agent # noqa: E402 -from . import codeact_agent # noqa: E402 -from . import planner_agent # noqa: E402 +from . import monologue_agent # noqa: E402 +from . import codeact_agent # noqa: E402 +from . import planner_agent # noqa: E402 __all__ = ['monologue_agent', 'codeact_agent', 'planner_agent'] diff --git a/agenthub/codeact_agent/README.md b/agenthub/codeact_agent/README.md index 3ca45e07e345..22b7ef6e5573 100644 --- a/agenthub/codeact_agent/README.md +++ b/agenthub/codeact_agent/README.md @@ -1,6 +1,6 @@ # CodeAct-based Agent Framework -This folder implements the [CodeAct idea](https://arxiv.org/abs/2402.13463) that relies on LLM to autonomously perform actions in a Bash shell. It requires more from the LLM itself: LLM needs to be capable enough to do all the stuff autonomously, instead of stuck in an infinite loop. +This folder implements the [CodeAct idea](https://arxiv.org/abs/2402.13463) that relies on LLM to autonomously perform actions in a Bash shell. It requires more from the LLM itself: LLM needs to be capable enough to do all the stuff autonomously, instead of stuck in an infinite loop. **NOTE: This agent is still highly experimental and under active development to reach the capability described in the original paper & [repo](https://github.com/xingyaoww/code-act).** @@ -18,6 +18,6 @@ Example: prompts `gpt-4-0125-preview` to write a flask server, install `flask` l image -Most of the things are working as expected, except at the end, the model did not follow the instruction to stop the interaction by outputting ` exit ` as instructed. +Most of the things are working as expected, except at the end, the model did not follow the instruction to stop the interaction by outputting ` exit ` as instructed. **TODO**: This should be fixable by either (1) including a complete in-context example like [this](https://github.com/xingyaoww/mint-bench/blob/main/mint/tasks/in_context_examples/reasoning/with_tool.txt), OR (2) collect some interaction data like this and fine-tune a model (like [this](https://github.com/xingyaoww/code-act), a more complex route). diff --git a/agenthub/codeact_agent/__init__.py b/agenthub/codeact_agent/__init__.py index cb427d10eb9f..a07c920950d9 100644 --- a/agenthub/codeact_agent/__init__.py +++ b/agenthub/codeact_agent/__init__.py @@ -1,4 +1,4 @@ from opendevin.agent import Agent from .codeact_agent import CodeActAgent -Agent.register("CodeActAgent", CodeActAgent) +Agent.register('CodeActAgent', CodeActAgent) diff --git a/agenthub/codeact_agent/codeact_agent.py b/agenthub/codeact_agent/codeact_agent.py index 96ab69298895..56bbb10bd0f9 100644 --- a/agenthub/codeact_agent/codeact_agent.py +++ b/agenthub/codeact_agent/codeact_agent.py @@ -24,7 +24,7 @@ {COMMAND_DOCS} """ if COMMAND_DOCS is not None - else "" + else '' ) SYSTEM_MESSAGE = f"""You are a helpful assistant. You will be provided access (as root) to a bash shell to complete user-provided tasks. You will be able to execute commands in the bash shell, interact with the file system, install packages, and receive the output of your commands. @@ -46,27 +46,29 @@ {COMMAND_SEGMENT} When you are done, execute the following to close the shell and end the conversation: -exit +exit """ INVALID_INPUT_MESSAGE = ( "I don't understand your input. \n" - "If you want to execute command, please use YOUR_COMMAND_HERE .\n" - "If you already completed the task, please exit the shell by generating: exit ." + 'If you want to execute command, please use YOUR_COMMAND_HERE .\n' + 'If you already completed the task, please exit the shell by generating: exit .' ) + def parse_response(response) -> str: action = response.choices[0].message.content - if "" in action and "" not in action: - action += "" + if '' in action and '' not in action: + action += '' return action + class CodeActAgent(Agent): """ - The Code Act Agent is a minimalist agent. + The Code Act Agent is a minimalist agent. The agent works by passing the model a list of action-observation pairs and prompting the model to take the next step. """ - + def __init__( self, llm: LLM, @@ -82,7 +84,7 @@ def __init__( def step(self, state: State) -> Action: """ - Performs one step using the Code Act Agent. + Performs one step using the Code Act Agent. This includes gathering info on previous steps and prompting the model to make a command to execute. Parameters: @@ -97,42 +99,45 @@ def step(self, state: State) -> Action: """ if len(self.messages) == 0: - assert state.plan.main_goal, "Expecting instruction to be set" + assert state.plan.main_goal, 'Expecting instruction to be set' self.messages = [ - {"role": "system", "content": SYSTEM_MESSAGE}, - {"role": "user", "content": state.plan.main_goal}, + {'role': 'system', 'content': SYSTEM_MESSAGE}, + {'role': 'user', 'content': state.plan.main_goal}, ] updated_info = state.updated_info if updated_info: for prev_action, obs in updated_info: assert isinstance( prev_action, (CmdRunAction, AgentEchoAction) - ), "Expecting CmdRunAction or AgentEchoAction for Action" + ), 'Expecting CmdRunAction or AgentEchoAction for Action' if isinstance( obs, AgentMessageObservation ): # warning message from itself - self.messages.append({"role": "user", "content": obs.content}) + self.messages.append( + {'role': 'user', 'content': obs.content}) elif isinstance(obs, CmdOutputObservation): - content = "OBSERVATION:\n" + obs.content - content += f"\n[Command {obs.command_id} finished with exit code {obs.exit_code}]]" - self.messages.append({"role": "user", "content": content}) + content = 'OBSERVATION:\n' + obs.content + content += f'\n[Command {obs.command_id} finished with exit code {obs.exit_code}]]' + self.messages.append({'role': 'user', 'content': content}) else: raise NotImplementedError( - f"Unknown observation type: {obs.__class__}" + f'Unknown observation type: {obs.__class__}' ) response = self.llm.completion( messages=self.messages, - stop=[""], + stop=[''], temperature=0.0 ) action_str: str = parse_response(response) - self.messages.append({"role": "assistant", "content": action_str}) + state.num_of_chars += sum(len(message['content']) + for message in self.messages) + len(action_str) + self.messages.append({'role': 'assistant', 'content': action_str}) - command = re.search(r"(.*)", action_str, re.DOTALL) + command = re.search(r'(.*)', action_str, re.DOTALL) if command is not None: # a command was found command_group = command.group(1) - if command_group.strip() == "exit": + if command_group.strip() == 'exit': return AgentFinishAction() return CmdRunAction(command=command_group) # # execute the code @@ -149,4 +154,4 @@ def step(self, state: State) -> Action: ) # warning message to itself def search_memory(self, query: str) -> List[str]: - raise NotImplementedError("Implement this abstract method") + raise NotImplementedError('Implement this abstract method') diff --git a/agenthub/monologue_agent/TODO.md b/agenthub/monologue_agent/TODO.md index 1ed81b005a02..5f44db4bef06 100644 --- a/agenthub/monologue_agent/TODO.md +++ b/agenthub/monologue_agent/TODO.md @@ -6,4 +6,3 @@ There's a lot of low-hanging fruit for this agent: * Improve memory condensing--condense earlier memories more aggressively * Limit the time that `run` can wait (in case agent runs an interactive command and it's hanging) * Figure out how to run background processes, e.g. `node server.js` to start a server - diff --git a/agenthub/monologue_agent/__init__.py b/agenthub/monologue_agent/__init__.py index 5ab817ee53b1..1cfef46f1188 100644 --- a/agenthub/monologue_agent/__init__.py +++ b/agenthub/monologue_agent/__init__.py @@ -1,4 +1,4 @@ from opendevin.agent import Agent from .agent import MonologueAgent -Agent.register("MonologueAgent", MonologueAgent) +Agent.register('MonologueAgent', MonologueAgent) diff --git a/agenthub/monologue_agent/agent.py b/agenthub/monologue_agent/agent.py index f04a050a7d62..63f93a1e7cdd 100644 --- a/agenthub/monologue_agent/agent.py +++ b/agenthub/monologue_agent/agent.py @@ -3,6 +3,7 @@ from opendevin.state import State from opendevin.llm.llm import LLM from opendevin.schema import ActionType, ObservationType +from opendevin.exceptions import AgentNoInstructionError from opendevin.action import ( Action, @@ -32,46 +33,46 @@ MAX_OUTPUT_LENGTH = 5000 INITIAL_THOUGHTS = [ - "I exist!", - "Hmm...looks like I can type in a command line prompt", - "Looks like I have a web browser too!", + 'I exist!', + 'Hmm...looks like I can type in a command line prompt', + 'Looks like I have a web browser too!', "Here's what I want to do: $TASK", - "How am I going to get there though?", - "It seems like I have some kind of short term memory.", - "Each of my thoughts seems to be stored in a JSON array.", - "It seems whatever I say next will be added as an object to the list.", - "But no one has perfect short-term memory. My list of thoughts will be summarized and condensed over time, losing information in the process.", - "Fortunately I have long term memory!", - "I can just perform a recall action, followed by the thing I want to remember. And then related thoughts just spill out!", + 'How am I going to get there though?', + 'It seems like I have some kind of short term memory.', + 'Each of my thoughts seems to be stored in a JSON array.', + 'It seems whatever I say next will be added as an object to the list.', + 'But no one has perfect short-term memory. My list of thoughts will be summarized and condensed over time, losing information in the process.', + 'Fortunately I have long term memory!', + 'I can just perform a recall action, followed by the thing I want to remember. And then related thoughts just spill out!', "Sometimes they're random thoughts that don't really have to do with what I wanted to remember. But usually they're exactly what I need!", "Let's try it out!", - "RECALL what it is I want to do", + 'RECALL what it is I want to do', "Here's what I want to do: $TASK", - "How am I going to get there though?", + 'How am I going to get there though?', "Neat! And it looks like it's easy for me to use the command line too! I just have to perform a run action and include the command I want to run in the command argument. The command output just jumps into my head!", 'RUN echo "hello world"', - "hello world", - "Cool! I bet I can write files too using the write action.", + 'hello world', + 'Cool! I bet I can write files too using the write action.', "WRITE echo \"console.log('hello world')\" > test.js", - "", + '', "I just created test.js. I'll try and run it now.", - "RUN node test.js", - "hello world", - "It works!", + 'RUN node test.js', + 'hello world', + 'It works!', "I'm going to try reading it now using the read action.", - "READ test.js", + 'READ test.js', "console.log('hello world')", - "Nice! I can read files too!", - "And if I want to use the browser, I just need to use the browse action and include the url I want to visit in the url argument", + 'Nice! I can read files too!', + 'And if I want to use the browser, I just need to use the browse action and include the url I want to visit in the url argument', "Let's try that...", - "BROWSE google.com", + 'BROWSE google.com', '
', - "I can browse the web too!", - "And once I have completed my task, I can use the finish action to stop working.", + 'I can browse the web too!', + 'And once I have completed my task, I can use the finish action to stop working.', "But I should only use the finish action when I'm absolutely certain that I've completed my task and have tested my work.", - "Very cool. Now to accomplish my task.", + 'Very cool. Now to accomplish my task.', "I'll need a strategy. And as I make progress, I'll need to keep refining that strategy. I'll need to set goals, and break them into sub-goals.", - "In between actions, I must always take some time to think, strategize, and set new goals. I should never take two actions in a row.", + 'In between actions, I must always take some time to think, strategize, and set new goals. I should never take two actions in a row.', "OK so my task is to $TASK. I haven't made any progress yet. Where should I start?", "It seems like there might be an existing project here. I should probably start by running `ls` to see what's here.", ] @@ -106,15 +107,15 @@ def _add_event(self, event: dict): - event (dict): The event that will be added to monologue and memory """ - if "extras" in event and "screenshot" in event["extras"]: - del event["extras"]["screenshot"] + if 'extras' in event and 'screenshot' in event['extras']: + del event['extras']['screenshot'] if ( - "args" in event - and "output" in event["args"] - and len(event["args"]["output"]) > MAX_OUTPUT_LENGTH + 'args' in event + and 'output' in event['args'] + and len(event['args']['output']) > MAX_OUTPUT_LENGTH ): - event["args"]["output"] = ( - event["args"]["output"][:MAX_OUTPUT_LENGTH] + "..." + event['args']['output'] = ( + event['args']['output'][:MAX_OUTPUT_LENGTH] + '...' ) self.monologue.add_event(event) @@ -131,57 +132,58 @@ def _initialize(self, task: str): - task (str): The initial goal statement provided by the user Raises: - - ValueError: If task is not provided + - AgentNoInstructionError: If task is not provided """ if self._initialized: return - if task is None or task == "": - raise ValueError("Instruction must be provided") + if task is None or task == '': + raise AgentNoInstructionError() self.monologue = Monologue() self.memory = LongTermMemory() - output_type = "" + output_type = '' for thought in INITIAL_THOUGHTS: - thought = thought.replace("$TASK", task) - if output_type != "": - observation: Observation = NullObservation(content="") + thought = thought.replace('$TASK', task) + if output_type != '': + observation: Observation = NullObservation(content='') if output_type == ObservationType.RUN: observation = CmdOutputObservation( - content=thought, command_id=0, command="" + content=thought, command_id=0, command='' ) elif output_type == ObservationType.READ: - observation = FileReadObservation(content=thought, path="") + observation = FileReadObservation(content=thought, path='') elif output_type == ObservationType.RECALL: - observation = AgentRecallObservation(content=thought, memories=[]) + observation = AgentRecallObservation( + content=thought, memories=[]) elif output_type == ObservationType.BROWSE: observation = BrowserOutputObservation( - content=thought, url="", screenshot="" + content=thought, url='', screenshot='' ) self._add_event(observation.to_dict()) - output_type = "" + output_type = '' else: action: Action = NullAction() - if thought.startswith("RUN"): - command = thought.split("RUN ")[1] + if thought.startswith('RUN'): + command = thought.split('RUN ')[1] action = CmdRunAction(command) output_type = ActionType.RUN - elif thought.startswith("WRITE"): - parts = thought.split("WRITE ")[1].split(" > ") + elif thought.startswith('WRITE'): + parts = thought.split('WRITE ')[1].split(' > ') path = parts[1] content = parts[0] action = FileWriteAction(path=path, content=content) - elif thought.startswith("READ"): - path = thought.split("READ ")[1] + elif thought.startswith('READ'): + path = thought.split('READ ')[1] action = FileReadAction(path=path) output_type = ActionType.READ - elif thought.startswith("RECALL"): - query = thought.split("RECALL ")[1] + elif thought.startswith('RECALL'): + query = thought.split('RECALL ')[1] action = AgentRecallAction(query=query) output_type = ActionType.RECALL - elif thought.startswith("BROWSE"): - url = thought.split("BROWSE ")[1] + elif thought.startswith('BROWSE'): + url = thought.split('BROWSE ')[1] action = BrowseURLAction(url=url) output_type = ActionType.BROWSE else: @@ -211,9 +213,10 @@ def step(self, state: State) -> Action: self.monologue.get_thoughts(), state.background_commands_obs, ) - messages = [{"content": prompt, "role": "user"}] + messages = [{'content': prompt, 'role': 'user'}] resp = self.llm.completion(messages=messages) - action_resp = resp["choices"][0]["message"]["content"] + action_resp = resp['choices'][0]['message']['content'] + state.num_of_chars += len(prompt) + len(action_resp) action = prompts.parse_action_response(action_resp) self.latest_action = action return action diff --git a/agenthub/monologue_agent/utils/memory.py b/agenthub/monologue_agent/utils/memory.py index 791c3a3cd628..172e00555468 100644 --- a/agenthub/monologue_agent/utils/memory.py +++ b/agenthub/monologue_agent/utils/memory.py @@ -7,36 +7,37 @@ from opendevin import config from . import json -embedding_strategy = config.get("LLM_EMBEDDING_MODEL") +embedding_strategy = config.get('LLM_EMBEDDING_MODEL') # TODO: More embeddings: https://docs.llamaindex.ai/en/stable/examples/embeddings/OpenAI/ # There's probably a more programmatic way to do this. -if embedding_strategy == "llama2": +if embedding_strategy == 'llama2': from llama_index.embeddings.ollama import OllamaEmbedding embed_model = OllamaEmbedding( - model_name="llama2", - base_url=config.get_or_error("LLM_BASE_URL"), - ollama_additional_kwargs={"mirostat": 0}, + model_name='llama2', + base_url=config.get('LLM_BASE_URL', required=True), + ollama_additional_kwargs={'mirostat': 0}, ) -elif embedding_strategy == "openai": +elif embedding_strategy == 'openai': from llama_index.embeddings.openai import OpenAIEmbedding embed_model = OpenAIEmbedding( - model="text-embedding-ada-002", - api_key=config.get_or_error("LLM_API_KEY") + model='text-embedding-ada-002', + api_key=config.get('LLM_API_KEY', required=True) ) -elif embedding_strategy == "azureopenai": - from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding # Need to instruct to set these env variables in documentation +elif embedding_strategy == 'azureopenai': + # Need to instruct to set these env variables in documentation + from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding embed_model = AzureOpenAIEmbedding( - model="text-embedding-ada-002", - deployment_name=config.get_or_error("LLM_DEPLOYMENT_NAME"), - api_key=config.get_or_error("LLM_API_KEY"), - azure_endpoint=config.get_or_error("LLM_BASE_URL"), - api_version=config.get_or_error("LLM_API_VERSION"), + model='text-embedding-ada-002', + deployment_name=config.get('LLM_DEPLOYMENT_NAME', required=True), + api_key=config.get('LLM_API_KEY', required=True), + azure_endpoint=config.get('LLM_BASE_URL', required=True), + api_version=config.get('LLM_API_VERSION', required=True), ) else: from llama_index.embeddings.huggingface import HuggingFaceEmbedding embed_model = HuggingFaceEmbedding( - model_name="BAAI/bge-small-en-v1.5" + model_name='BAAI/bge-small-en-v1.5' ) @@ -51,9 +52,10 @@ def __init__(self): Initialize the chromadb and set up ChromaVectorStore for later use. """ db = chromadb.Client() - self.collection = db.get_or_create_collection(name="memories") + self.collection = db.get_or_create_collection(name='memories') vector_store = ChromaVectorStore(chroma_collection=self.collection) - self.index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model) + self.index = VectorStoreIndex.from_vector_store( + vector_store, embed_model=embed_model) self.thought_idx = 0 def add_event(self, event: dict): @@ -63,27 +65,27 @@ def add_event(self, event: dict): Parameters: - event (dict): The new event to be added to memory """ - id = "" - t = "" - if "action" in event: - t = "action" - id = event["action"] - elif "observation" in event: - t = "observation" - id = event["observation"] + id = '' + t = '' + if 'action' in event: + t = 'action' + id = event['action'] + elif 'observation' in event: + t = 'observation' + id = event['observation'] doc = Document( text=json.dumps(event), doc_id=str(self.thought_idx), extra_info={ - "type": t, - "id": id, - "idx": self.thought_idx, + 'type': t, + 'id': id, + 'idx': self.thought_idx, }, ) self.thought_idx += 1 self.index.insert(doc) - def search(self, query: str, k: int=10): + def search(self, query: str, k: int = 10): """ Searches through the current memory using VectorIndexRetriever @@ -100,5 +102,3 @@ def search(self, query: str, k: int=10): ) results = retriever.retrieve(query) return [r.get_text() for r in results] - - diff --git a/agenthub/monologue_agent/utils/monologue.py b/agenthub/monologue_agent/utils/monologue.py index 6f187789eb95..8dfb4ce6e061 100644 --- a/agenthub/monologue_agent/utils/monologue.py +++ b/agenthub/monologue_agent/utils/monologue.py @@ -1,5 +1,7 @@ import traceback + from opendevin.llm.llm import LLM +from opendevin.exceptions import AgentEventTypeError import agenthub.monologue_agent.utils.json as json import agenthub.monologue_agent.utils.prompts as prompts @@ -24,10 +26,10 @@ def add_event(self, t: dict): - t (dict): The thought that we want to add to memory Raises: - - ValueError: If t is not a dict + - AgentEventTypeError: If t is not a dict """ if not isinstance(t, dict): - raise ValueError('Event must be a dictionary') + raise AgentEventTypeError() self.thoughts.append(t) def get_thoughts(self): @@ -51,7 +53,7 @@ def get_total_length(self): try: total_length += len(json.dumps(t)) except TypeError as e: - print(f"Error serializing thought: {e}") + print(f'Error serializing thought: {e}') return total_length def condense(self, llm: LLM): @@ -73,4 +75,4 @@ def condense(self, llm: LLM): self.thoughts = prompts.parse_summary_response(summary_resp) except Exception as e: traceback.print_exc() - raise RuntimeError(f"Error condensing thoughts: {e}") + raise RuntimeError(f'Error condensing thoughts: {e}') diff --git a/agenthub/monologue_agent/utils/prompts.py b/agenthub/monologue_agent/utils/prompts.py index c58cb2dd9e4b..05f6bfaff4e7 100644 --- a/agenthub/monologue_agent/utils/prompts.py +++ b/agenthub/monologue_agent/utils/prompts.py @@ -1,6 +1,9 @@ from typing import List from . import json +from json import JSONDecodeError + +import re from opendevin.action import ( action_from_dict, @@ -9,10 +12,10 @@ from opendevin.observation import ( CmdOutputObservation, ) +from opendevin.exceptions import LLMOutputError ACTION_PROMPT = """ You're a thoughtful robot. Your main task is this: - %(task)s Don't expand the scope of your task--just complete it as written. @@ -91,17 +94,18 @@ def get_summarize_monologue_prompt(thoughts: List[dict]): """ Gets the prompt for summarizing the monologue - Returns: + Returns: - str: A formatted string with the current monologue within the prompt """ return MONOLOGUE_SUMMARY_PROMPT % { 'monologue': json.dumps({'old_monologue': thoughts}, indent=2), } + def get_request_action_prompt( - task: str, - thoughts: List[dict], - background_commands_obs: List[CmdOutputObservation] = [], + task: str, + thoughts: List[dict], + background_commands_obs: List[CmdOutputObservation] = [], ): """ Gets the action prompt formatted with appropriate values. @@ -118,22 +122,24 @@ def get_request_action_prompt( hint = '' if len(thoughts) > 0: latest_thought = thoughts[-1] - if "action" in latest_thought: - if latest_thought["action"] == 'think': - if latest_thought["args"]['thought'].startswith("OK so my task is"): + if 'action' in latest_thought: + if latest_thought['action'] == 'think': + if latest_thought['args']['thought'].startswith('OK so my task is'): hint = "You're just getting started! What should you do first?" else: hint = "You've been thinking a lot lately. Maybe it's time to take action?" - elif latest_thought["action"] == 'error': - hint = "Looks like that last command failed. Maybe you need to fix it, or try something else." + elif latest_thought['action'] == 'error': + hint = 'Looks like that last command failed. Maybe you need to fix it, or try something else.' - bg_commands_message = "" + bg_commands_message = '' if len(background_commands_obs) > 0: - bg_commands_message = "The following commands are running in the background:" + bg_commands_message = 'The following commands are running in the background:' for command_obs in background_commands_obs: - bg_commands_message += f"\n`{command_obs.command_id}`: {command_obs.command}" - bg_commands_message += "\nYou can end any process by sending a `kill` action with the numerical `id` above." - + bg_commands_message += ( + f'\n`{command_obs.command_id}`: {command_obs.command}' + ) + bg_commands_message += '\nYou can end any process by sending a `kill` action with the numerical `id` above.' + return ACTION_PROMPT % { 'task': task, 'monologue': json.dumps(thoughts, indent=2), @@ -141,6 +147,7 @@ def get_request_action_prompt( 'hint': hint, } + def parse_action_response(response: str) -> Action: """ Parses a string to find an action within it @@ -151,12 +158,28 @@ def parse_action_response(response: str) -> Action: Returns: - Action: The action that was found in the response string """ - action_dict = json.loads(response) + try: + action_dict = json.loads(response) + except JSONDecodeError: + # Find response-looking json in the output and use the more promising one. Helps with weak llms + response_json_matches = re.finditer( + r"""{\s*\"action\":\s?\"(\w+)\"(?:,?|,\s*\"args\":\s?{((?:.|\s)*?)})\s*}""", + response) # Find all response-looking strings + + def rank(match): + return len(match[2]) if match[1] == 'think' else 130 # Crudely rank multiple responses by length + try: + action_dict = json.loads(max(response_json_matches, key=rank)[0]) # Use the highest ranked response + except ValueError as e: + raise LLMOutputError( + "Output from the LLM isn't properly formatted. The model may be misconfigured." + ) from e if 'content' in action_dict: # The LLM gets confused here. Might as well be robust action_dict['contents'] = action_dict.pop('content') return action_from_dict(action_dict) + def parse_summary_response(response: str) -> List[dict]: """ Parses a summary of the monologue diff --git a/agenthub/planner_agent/__init__.py b/agenthub/planner_agent/__init__.py index c1a3f6e9238f..77bed3e68673 100644 --- a/agenthub/planner_agent/__init__.py +++ b/agenthub/planner_agent/__init__.py @@ -1,4 +1,4 @@ from opendevin.agent import Agent from .agent import PlannerAgent -Agent.register("PlannerAgent", PlannerAgent) +Agent.register('PlannerAgent', PlannerAgent) diff --git a/agenthub/planner_agent/agent.py b/agenthub/planner_agent/agent.py index e381d5ee320a..de0672ef5747 100644 --- a/agenthub/planner_agent/agent.py +++ b/agenthub/planner_agent/agent.py @@ -7,6 +7,7 @@ from opendevin.state import State from opendevin.action import Action + class PlannerAgent(Agent): """ The planner agent utilizes a special prompting strategy to create long term plans for solving problems. @@ -24,7 +25,7 @@ def __init__(self, llm: LLM): def step(self, state: State) -> Action: """ - Checks to see if current step is completed, returns AgentFinishAction if True. + Checks to see if current step is completed, returns AgentFinishAction if True. Otherwise, creates a plan prompt and sends to model for inference, returning the result as the next action. Parameters: @@ -38,12 +39,12 @@ def step(self, state: State) -> Action: if state.plan.task.state in ['completed', 'verified', 'abandoned']: return AgentFinishAction() prompt = get_prompt(state.plan, state.history) - messages = [{"content": prompt, "role": "user"}] + messages = [{'content': prompt, 'role': 'user'}] resp = self.llm.completion(messages=messages) action_resp = resp['choices'][0]['message']['content'] + state.num_of_chars += len(prompt) + len(action_resp) action = parse_response(action_resp) return action def search_memory(self, query: str) -> List[str]: return [] - diff --git a/agenthub/planner_agent/prompt.py b/agenthub/planner_agent/prompt.py index 1259a4197c20..a716d689909c 100644 --- a/agenthub/planner_agent/prompt.py +++ b/agenthub/planner_agent/prompt.py @@ -155,14 +155,14 @@ def get_prompt(plan: Plan, history: List[Tuple[Action, Observation]]) -> str: if not isinstance(observation, NullObservation): observation_dict = observation.to_dict() if ( - "extras" in observation_dict - and "screenshot" in observation_dict["extras"] + 'extras' in observation_dict + and 'screenshot' in observation_dict['extras'] ): - del observation_dict["extras"]["screenshot"] + del observation_dict['extras']['screenshot'] history_dicts.append(observation_dict) history_str = json.dumps(history_dicts, indent=2) - hint = "" + hint = '' current_task = plan.get_current_task() if current_task is not None: plan_status = f"You're currently working on this task:\n{current_task.goal}." @@ -172,39 +172,39 @@ def get_prompt(plan: Plan, history: List[Tuple[Action, Observation]]) -> str: plan_status = "You're not currently working on any tasks. Your next action MUST be to mark a task as in_progress." hint = plan_status - latest_action_id = latest_action.to_dict()["action"] + latest_action_id = latest_action.to_dict()['action'] if current_task is not None: - if latest_action_id == "": + if latest_action_id == '': hint = "You haven't taken any actions yet. Start by using `ls` to check out what files you're working with." elif latest_action_id == ActionType.RUN: - hint = "You should think about the command you just ran, what output it gave, and how that affects your plan." + hint = 'You should think about the command you just ran, what output it gave, and how that affects your plan.' elif latest_action_id == ActionType.READ: - hint = "You should think about the file you just read, what you learned from it, and how that affects your plan." + hint = 'You should think about the file you just read, what you learned from it, and how that affects your plan.' elif latest_action_id == ActionType.WRITE: - hint = "You just changed a file. You should think about how it affects your plan." + hint = 'You just changed a file. You should think about how it affects your plan.' elif latest_action_id == ActionType.BROWSE: - hint = "You should think about the page you just visited, and what you learned from it." + hint = 'You should think about the page you just visited, and what you learned from it.' elif latest_action_id == ActionType.THINK: hint = "Look at your last thought in the history above. What does it suggest? Don't think anymore--take action." elif latest_action_id == ActionType.RECALL: - hint = "You should think about the information you just recalled, and how it should affect your plan." + hint = 'You should think about the information you just recalled, and how it should affect your plan.' elif latest_action_id == ActionType.ADD_TASK: - hint = "You should think about the next action to take." + hint = 'You should think about the next action to take.' elif latest_action_id == ActionType.MODIFY_TASK: - hint = "You should think about the next action to take." + hint = 'You should think about the next action to take.' elif latest_action_id == ActionType.SUMMARIZE: - hint = "" + hint = '' elif latest_action_id == ActionType.FINISH: - hint = "" + hint = '' - print_with_color("HINT:\n" + hint, "INFO") + print_with_color('HINT:\n' + hint, 'INFO') return prompt % { - "task": plan.main_goal, - "plan": plan_str, - "history": history_str, - "hint": hint, - "plan_status": plan_status, + 'task': plan.main_goal, + 'plan': plan_str, + 'history': history_str, + 'hint': hint, + 'plan_status': plan_status, } @@ -218,12 +218,12 @@ def parse_response(response: str) -> Action: Returns: - Action: A valid next action to perform from model output """ - json_start = response.find("{") - json_end = response.rfind("}") + 1 + json_start = response.find('{') + json_end = response.rfind('}') + 1 response = response[json_start:json_end] action_dict = json.loads(response) - if "contents" in action_dict: + if 'contents' in action_dict: # The LLM gets confused here. Might as well be robust - action_dict["content"] = action_dict.pop("contents") + action_dict['content'] = action_dict.pop('contents') action = action_from_dict(action_dict) return action diff --git a/docker-compose.yml b/docker-compose.yml index 342b88da4085..e3403befff2e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -3,19 +3,21 @@ version: "3.8" services: # Devin Out-of-The-Box Agent Service devin: - container_name: devin + container_name: devin_backend hostname: devin - image: lehcode/opendevin-cuda${CUDA_VERSION:?}-miniconda:dev + image: lehcode/opendevin_api-cuda${CUDA_VERSION:?}-miniconda:dev pull_policy: always + # Override the default models and rebuild image with restart container then + # These arguments will override defaults + # command: "[-m ] [-e ]" build: dockerfile: docker/devin/app/Dockerfile args: - ws_port: ${DEVIN_WS_PORT:?} - jupyter_port: ${JUPYTER_PORT:?} + # jupyter_port: ${JUPYTER_PORT:?} debug: ${DEBUG:-} conda_dir: /var/miniconda nvidia_utils_driver: 550 - app_dir: ${APP_DIR:?} + app_root: ${APP_ROOT:?} venv_name: ${VENV_NAME:?} locale: ${LANG:?} timezone: ${TZ:?} @@ -33,19 +35,18 @@ services: volumes: - root_dir_vol:/root - pip_cache_vol:/root/.cache/pip - - conda_vol:${CONDA_PREFIX:?} -# - ./requirements.txt:${APP_DIR:?}/requirements.txt - - ./environment.yml:${APP_DIR:?}/environment.yml + - conda_vol:${CONDA_ROOT:?} + - ./environment.yml:${APP_ROOT:?}/environment.yml - ./workspace:${WORKSPACE_DIR:?} - - ./docker/devin/app/.condarc:${CONDA_PREFIX:?}/.condarc - - ./Makefile:${APP_DIR:?}/Makefile - - ./pyproject.toml:${APP_DIR:?}/pyproject.toml - - ./config.toml.template:${APP_DIR:?}/config.toml.template - - ./.env:${APP_DIR:?}/.env - - ./opendevin:${APP_DIR:?}/opendevin - - ./agenthub:${APP_DIR:?}/agenthub - - ./dev_config:${APP_DIR:?}/dev_config - - ./tests:${APP_DIR:?}/tests +# - ./docker/devin/app/.condarc:${CONDA_ROOT:?}/.condarc +# - ./Makefile:${APP_ROOT:?}/Makefile +# - ./pyproject.toml:${APP_ROOT:?}/pyproject.toml +# - ./config.toml.dist:${APP_ROOT:?}/config.toml +# - ./.env:${APP_ROOT:?}/.env +# - ./opendevin:${APP_ROOT:?}/opendevin +# - ./agenthub:${APP_ROOT:?}/agenthub +# - ./dev_config:${APP_ROOT:?}/dev_config +# - ./tests:${APP_ROOT:?}/tests tmpfs: - /run - /tmp @@ -53,8 +54,9 @@ services: ports: - "${DEVIN_WS_PORT}:${DEVIN_WS_PORT}" - "${JUPYTER_PORT:?}:${JUPYTER_PORT:?}" + # VSCode Python remote debugger + - "5678:5678" tty: true - stdin_open: true restart: no entrypoint: /docker-entrypoint.sh deploy: @@ -74,10 +76,10 @@ services: litellm: image: ghcr.io/berriai/litellm:main-latest pull_policy: always - container_name: litellm + container_name: litellm_proxy command: "--config /etc/config.yaml --port ${LITELLM_PORT} --num_workers 8 --detailed_debug" environment: - LITELLM_DEFAULT_MODEL: ${LITELLM_DEFAULT_MODEL:?} + DEFAULT_CHAT_MODEL: ${DEFAULT_CHAT_MODEL:?} DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:${POSTGRES_CONTAINER_PORT}/${POSTGRES_DB} ports: - "${LITELLM_PORT}:4000" @@ -105,7 +107,7 @@ services: redis: image: redis/redis-stack:latest pull_policy: always - container_name: redis-stack + container_name: redis_stack env_file: - .env - docker/redis/redis.env @@ -120,7 +122,7 @@ services: postgres: image: postgres:latest pull_policy: always - container_name: db-postgres + container_name: db_postgres env_file: - .env - docker/postgres/postgres.env @@ -135,12 +137,12 @@ services: - opendevin-net # UI service - ui: - container_name: devin-ui + web_ui: + container_name: devin_ui image: lehcode/opendevin_ui-node${NODE_VERSION:?}-npm${NPM_VERSION}-pnpm-reactjs:dev pull_policy: always build: - dockerfile: docker/devin/ui/Dockerfile + dockerfile: docker/devin/web_ui/Dockerfile args: node_version: ${NODE_VERSION:?} npm_version: ${NPM_VERSION:?} @@ -149,7 +151,7 @@ services: node_options: ${NODE_OPTIONS} env_file: - ./.env - - docker/devin/ui/.env + - docker/devin/web_ui/.env environment: BACKEND_HOST: ${DEVIN_HOST:?}:${DEVIN_API_PORT:?} FRONTEND_PORT: ${UI_HTTP_PORT:?} @@ -160,8 +162,8 @@ services: ports: - "${UI_HTTP_PORT:?}:${UI_HTTP_PORT:?}" - "${UI_HTTPS_PORT:?}:${UI_HTTPS_PORT:?}" - tty: false - entrypoint: "/docker-entrypoint.sh" + tty: true + command: "" networks: opendevin-net: diff --git a/docker/devin/app/.condarc b/docker/devin/app/.condarc index e69de29bb2d1..e14f08871c5b 100644 --- a/docker/devin/app/.condarc +++ b/docker/devin/app/.condarc @@ -0,0 +1,8 @@ +channel_priority: disabled +devin_backend | channels: + devin_backend | - microsoft + devin_backend | - pytorch + devin_backend | - anaconda + devin_backend | - conda-forge + devin_backend | - conda + devin_backend | - defaults diff --git a/docker/devin/app/Dockerfile b/docker/devin/app/Dockerfile index 2692450119b7..9c0cd995a353 100644 --- a/docker/devin/app/Dockerfile +++ b/docker/devin/app/Dockerfile @@ -3,13 +3,12 @@ ARG ubuntu_tag=ubuntu20.04 ARG tag="${cuda_version}-devel-${ubuntu_tag}" FROM nvidia/cuda:${tag} as build-app ARG cuda_version -ARG ubuntu_version +ARG ubuntu_version=Ubuntu-20.04 LABEL org.opencontainers.image.description="Devin with Nvidia CUDA v${cuda_version} and Miniconda3" LABEL org.opencontainers.image.author="lehcode <53556648+lehcode@users.noreply.github.com>" ARG debug -#ARG pip_cache_dir=/root/.cache/pip ARG apt_cache_dir=/var/cache/apt ARG nvidia_utils_driver=550 @@ -17,15 +16,18 @@ ARG nvidia_utils_driver=550 ENV DEBUG="$debug" ENV DEBIAN_FRONTEND=noninteractive +COPY docker/locales /etc/locale.gen + ADD --checksum=sha256:b978856ec3c826eb495b60e3fffe621f670c101150ebcbdeede4f961f22dc438 https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh /tmp/miniconda.sh RUN --mount=type=cache,target=${apt_cache_dir},sharing=locked \ if [ -n "${DEBUG}" ]; then set -eux; fi && \ - apt-get -q update && \ + echo "Updating $ubuntu_version packages..." && \ + apt-get -q update > /dev/null && \ apt-get install -qy --no-install-recommends \ tzdata locales cpanminus curl git gnupg gnupg2 gnupg1 wget \ - nvidia-utils-${nvidia_utils_driver} && \ - if [ -z "${DEBUG}" ]; then apt-get -qy upgrade; fi + nvidia-utils-${nvidia_utils_driver} > /dev/null && \ + if [ -z "${DEBUG}" ]; then apt-get -qy upgrade > /dev/null; fi ARG timezone=Etc/UTC ARG locale @@ -33,14 +35,13 @@ ARG locale ENV LANG="$locale" ENV TZ="$timezone" -COPY docker/locales /etc/locale.gen - -RUN if [ -n "$debug" ]; then set -eux; fi && \ - ln -fs "/usr/share/zoneinfo/$timezone" /etc/localtime && \ - echo "$TZ" > /etc/timezone && \ - dpkg-reconfigure -f noninteractive tzdata locales && \ - apt-get -q update && \ - apt-get -qy upgrade && \ +RUN --mount=type=cache,target=/usr/share/i18n/locales \ + if [ -n "$debug" ]; then set -eux; fi && \ + echo "Configuring timezone and $ubuntu_version locale..." && \ + ln -fs "/usr/share/zoneinfo/$timezone" /etc/localtime > /dev/null && \ + echo "$TZ" | tee -pa /etc/timezone > /dev/null && \ + dpkg-reconfigure -f noninteractive tzdata locales > /dev/null && \ + apt-get -q update && apt-get -qy upgrade && \ rm -f /usr/lib/x86_64-linux-gnu/libcudadebugger.so.1 && \ rm -f /usr/lib/x86_64-linux-gnu/libnvidia-ml.so.1 && \ rm -f /usr/lib/x86_64-linux-gnu/libcuda.so.1 && \ @@ -48,50 +49,42 @@ RUN if [ -n "$debug" ]; then set -eux; fi && \ COPY docker/devin/app/conda.base.tmpl.yml /tmp/environment.yml -ARG conda_dir +ARG conda_root=/var/miniconda3 ARG venv_name -ARG app_dir +ARG app_root -ENV CONDA_PREFIX=$conda_dir +ENV CONDA_ROOT=$conda_root ENV VENV_NAME="$venv_name" -ENV APP_DIR="$app_dir" - - -ARG app_dir - -WORKDIR "$app_dir" +ENV APP_ROOT="$app_root" RUN if [ -n "${DEBUG}" ]; then set -eux; fi && \ - mkdir -p "${CONDA_PREFIX}" && \ - bash /tmp/miniconda.sh -b -u -p ${CONDA_PREFIX} && \ - sed -i "s//$(echo "${CONDA_PREFIX}/envs/${VENV_NAME}" | sed -e 's/[\/&]/\\&/g')/g" /tmp/environment.yml && \ - sed -i "s//${VENV_NAME}/g" /tmp/environment.yml + echo "Installing Miniconda..." && \ + mkdir -p "${CONDA_ROOT}" && \ + bash /tmp/miniconda.sh -b -u -p ${CONDA_ROOT} > /dev/null && \ + sed -i "s//$(echo "${CONDA_ROOT}/envs/${VENV_NAME}" | sed -e 's/[\/&]/\\&/g')/g" /tmp/environment.yml > /dev/null && \ + sed -i "s//${VENV_NAME}/g" /tmp/environment.yml > /dev/null -ENV PATH="/root/.local/bin:${CONDA_PREFIX}/bin:${PATH}" -ARG conda_pkgs_dir=$CONDA_PREFIX/pkgs +ENV PATH="/root/.local/bin:${CONDA_ROOT}/bin:${PATH}" +ARG conda_pkgs_dir=$CONDA_ROOT/pkgs -WORKDIR $CONDA_PREFIX +COPY docker/devin/app/.condarc "${CONDA_ROOT}/.condarc" RUN --mount=type=cache,target=${conda_pkgs_dir},sharing=locked \ if [ -n "${DEBUG}" ]; then set -eux; fi && \ - conda config --add channels conda && \ - conda config --prepend channels conda-forge && \ - conda config --set channel_priority disabled && \ - conda install -qy pip && \ - conda init -q bash && \ - conda env create -y -f /tmp/environment.yml -n "${VENV_NAME}" 2>&1 > /dev/null + conda install -qy pip > /dev/null && \ + conda init bash > /dev/null && \ + echo "Configuring ${VENV_NAME} environment..." && \ + conda env create -qy -f /tmp/environment.yml -n "${VENV_NAME}" > /dev/null RUN --mount=type=cache,target=${conda_pkgs_dir},sharing=locked \ if [ -n "${DEBUG}" ]; then set -eux; fi && \ - conda install -q -y -n "${VENV_NAME}" pip + conda install -qy -n "${VENV_NAME}" pip > /dev/null ARG bin_dir=/usr/local/bin -ENV PYTHONPATH="$app_dir" +ENV PYTHONPATH="${APP_ROOT}" ENV BIN_DIR="$bin_dir" -WORKDIR "$app_dir" - # Activate Miniconda environment RUN eval "$(conda shell.bash activate "${VENV_NAME}")" # Make RUN commands use the new environment @@ -99,27 +92,33 @@ SHELL ["conda", "run", "-n", "od_env", "/bin/bash", "-c"] RUN --mount=type=cache,target=${conda_pkgs_dir},sharing=locked \ if [ -n "$debug" ]; then set -eux; fi && \ - conda config --add channels anaconda && \ - conda config --add channels conda-forge && \ - conda config --set channel_priority strict && \ - conda install -y uvicorn chromadb jupyter && \ - conda config --add channels pytorch && \ - conda install -y pytorch::pytorch && \ - conda config --add channels microsoft && \ - conda install -y microsoft::playwright && \ - conda config --set channel_priority disabled + echo "Setting up Miniconda..." && \ + conda config -q --add channels anaconda > /dev/null && \ + conda config -q --add channels pytorch > /dev/null && \ + conda config -q --add channels microsoft > /dev/null && \ + conda config -q --set channel_priority disabled + +WORKDIR "$APP_ROOT" COPY .env . -COPY config.toml.template . -COPY requirements.txt . COPY pyproject.toml . - -RUN --mount=type=cache,target=${conda_pkgs_dir},sharing=locked \ +COPY README.md . + +ENV POETRY_HOME=/etc/poetry +ENV PATH="${POETRY_HOME}/bin:${PATH}" +ENV POETRY_CACHE_DIR=/root/.cache/pypoetry +# If you do not want to install the current project use --no-root. +# If you want to use Poetry only for dependency management but not for packaging, you can disable package mode by +# setting package-mode = false in your pyproject.toml file. +# In a future version of Poetry this warning will become an error! +RUN --mount=type=cache,target=${POETRY_CACHE_DIR} \ if [ -n "$debug" ]; then set -eux; fi && \ - pip install -r requirements.txt + curl -sSL "https://install.python-poetry.org" | python3 - > /dev/null && poetry --version && \ + echo "Building OpenDevin..." && \ + poetry install --no-root --no-plugins > /dev/null -ARG litellm_port=11111 -ARG jupyter_port=37799 +#ARG litellm_port=11111 +#ARG jupyter_port=37799 ARG workspace_dir COPY agenthub agenthub @@ -130,17 +129,32 @@ COPY tests tests RUN --mount=type=cache,target=${conda_pkgs_dir},sharing=locked \ if [ -n "$debug" ]; then set -eux; fi && \ - conda env export -n "${VENV_NAME}" > environment.yml + echo "Installing Jupyter Notebook..." && \ + if [ -n "$debug" ]; then set -eux; fi && \ + conda install -qy jupyter > /dev/null -RUN if [ -n "$debug" ]; then set -eux; fi && \ - echo "Conda environments info:" && \ - conda info --envs && \ - echo "PYTHONPATH variable:" $(env | grep PYTHONPATH) +RUN --mount=type=cache,target=${conda_pkgs_dir},sharing=locked \ + if [ -n "$debug" ]; then set -eux; fi && \ + echo "Dumping ${VENV_NAME} environment config..." && \ + if [ -n "$debug" ]; then set -eux; fi && \ + conda env export -q -n "${VENV_NAME}" -f environment.yml -COPY docker/devin/app/entrypoint.sh /docker-entrypoint.sh -COPY docker/env_debug.sh "$bin_dir/env_debug" +COPY dev_config/python/.pre-commit-config.yaml /tmp/ +COPY .git .git +COPY .gitignore . +COPY .gitattributes . -COPY docker/devin/app/configure.py "${APP_DIR}/configure_devin" -COPY docker/devin/app/devin_up.py "${APP_DIR}/devin_up" +COPY docker/devin/app/devin_up.py oppendevin_launcher +COPY config.toml.template config.toml +COPY docker/env_debug.sh "${APP_ROOT}/run/env_debug" +COPY docker/devin/app/entrypoint.sh /docker-entrypoint.sh -CMD ["-m", "mixtral", "-e", "llama2"] +RUN if [ -n "$debug" ]; then set -eux; fi && \ + echo "Finalizing build..." && \ + git config --unset-all core.hooksPath || true > /dev/null && \ + poetry run pre-commit install --config /tmp/.pre-commit-config.yaml && \ + chmod a+x /docker-entrypoint.sh && \ + rm -rf /var/lib/apt/lists/* + +ENTRYPOINT ["/docker-entrypoint.sh", "-c"] +CMD "-m ${DEFAULT_CHAT_MODEL} -e ${DEFAULT_EMBEDDINGS_MODEL} --" diff --git a/docker/devin/app/configure.py b/docker/devin/app/configure.py deleted file mode 100644 index d65a09c27f84..000000000000 --- a/docker/devin/app/configure.py +++ /dev/null @@ -1,20 +0,0 @@ -import sys - -def main(): - # LLM model name - llm_model = sys.argv[1] - # Embedding model - embedding_model = sys.argv[2] - - # Process the input - configure_devin(user_input) - - # Display the processed input - print("DONE configuring OpenDevon") - -def configure_devin(model: string, embedding_model: string): - # Process the input here, for example, you can just return it as is - return input_string - -if __name__ == "__main__": - main() diff --git a/docker/devin/app/devin_up.py b/docker/devin/app/devin_up.py index 968439caa4a9..d3059fa596a4 100644 --- a/docker/devin/app/devin_up.py +++ b/docker/devin/app/devin_up.py @@ -4,8 +4,9 @@ def parse_arguments(): parser = argparse.ArgumentParser(description="Run Uvicorn server for OpenDevin app.") + parser.add_argument("--llm-model", type=str, default="mistral:7b", help="Default chat/instruct model") + parser.add_argument("--embeddings-model", type=str, default="llama2", help="Default embeddings model") parser.add_argument("--port", type=int, default=4173, help="Port for the server") - parser.add_argument("--host", type=str, default="0.0.0.0", help="Host for the server") parser.add_argument("--reload", action="store_true", help="Enable auto-reloading of the server") parser.add_argument("--log-level", type=str, default="info", choices=["critical", "error", "warning", "info", "debug"], help="Log level for the server") return parser.parse_args() @@ -16,7 +17,7 @@ def parse_arguments(): # Run Uvicorn server uvicorn.run("opendevin.server.listen:app", - host=args.host, + host="0.0.0.0", port=args.port, - reload=args.reload, + reload=True, log_level=args.log_level) diff --git a/docker/devin/app/entrypoint.sh b/docker/devin/app/entrypoint.sh old mode 100755 new mode 100644 index 79c14f10cf90..81da4cd3c3d5 --- a/docker/devin/app/entrypoint.sh +++ b/docker/devin/app/entrypoint.sh @@ -3,29 +3,30 @@ eval "$(conda shell.bash activate "${VENV_NAME}")" if [ -n "${DEBUG}" ]; then - echo "Python executable in ${VENV_NAME}': $(which python3) v$(python3 --version)" - - echo "Conda environments info:" + printf "******\n* System information: \n******" + /bin/bash < "${APP_ROOT}/run/env_debug" + echo "Python executable in ${VENV_NAME}: $(which python3) v$(python3 --version)" + echo "Anaconda environments info:" conda info --envs - - env | grep PYTHONPATH - + echo "Anaconda packages sources: $(conda config --show-sources)" + echo "PYTHONPATH: $(env | grep PYTHONPATH)" + env | grep LITELLM_PORT + env | grep JUPYTER_PORT echo "Nvidia CUDA properties:" nvidia-smi -# pwd - bash $BIN_DIR/env_debug + bash "$BIN_DIR/env_debug.sh" fi set -eux # Start API server if [ -n "${DEBUG}" ]; then - python3 "${APP_DIR}/devin_up" \ - --port "${DEVIN_API_PORT}" --host "${DEVIN_HOST}" \ - --log-level critical + python3 oppendevin_launcher --port "${DEVIN_API_PORT}" --reload --log-level info \ + --llm-model mistral:7b \ + --embeddings-model llama2 else - python3 "${APP_DIR}/devin_up" \ - --port "${DEVIN_API_PORT}" --host "${DEVIN_HOST}" \ - --reload --log-level info + python3 oppendevin_launcher --port "${DEVIN_API_PORT}" --reload --log-level critical \ + --llm-model mistral:7b \ + --embeddings-model llama2 fi diff --git a/docker/devin/ui/entrypoint.sh b/docker/devin/ui/entrypoint.sh deleted file mode 100755 index 91ee6e935380..000000000000 --- a/docker/devin/ui/entrypoint.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash --login - -if [ -n "${DEBUG}" ]; then set -eux; fi - -PATH="${PATH}:$yarn_global_root/node_modules/npm/bin:$yarn_global_root/bin" - -# pwd - -# echo ${PATH} - -source ${BIN_DIR/env_debug} | bash - -# yarn install - -ls -al . | grep node_modules - - -if [ -n "${DEBUG}" ]; then - vite --config vite.config.js --host 0.0.0.0 --port "${UI_HTTP_PORT:?}" \ - --clearScreen false --debug True -else - vite --config vite.config.js --host 0.0.0.0 --port "${UI_HTTP_PORT:?}" -fi diff --git a/docker/devin/ui/.env.dist b/docker/devin/web_ui/.env.dist similarity index 100% rename from docker/devin/ui/.env.dist rename to docker/devin/web_ui/.env.dist diff --git a/docker/devin/ui/Dockerfile b/docker/devin/web_ui/Dockerfile similarity index 69% rename from docker/devin/ui/Dockerfile rename to docker/devin/web_ui/Dockerfile index ceef76c08c37..7858313b3240 100644 --- a/docker/devin/ui/Dockerfile +++ b/docker/devin/web_ui/Dockerfile @@ -12,12 +12,13 @@ COPY docker/openssl.cnf /etc/ssl/openssl.cnf RUN if [ -n "$debug" ]; then set -eux; fi && \ apk update && if [ -z "$debug" ]; then apk upgrade; fi && \ - apk --no-cache add openssl ca-certificates && \ + apk --no-cache add git openssl ca-certificates && \ mkdir -p /root/devin/.ssl && \ openssl req -x509 -nodes -days 365 -newkey rsa:2048 \ - -keyout /root/devin/.ssl/privkey.pem \ - -out /root/devin/.ssl/fullchain.pem \ - -config /etc/ssl/openssl.cnf + -keyout /root/devin/.ssl/server_privkey.pem \ + -out /root/devin/.ssl/server_fullchain.pem \ + -config /etc/ssl/openssl.cnf && \ + rm -rf /var/cache/apk/* ARG node_env ARG node_options @@ -52,18 +53,22 @@ COPY frontend/public ./public COPY frontend/scripts ./scripts COPY .env . - RUN --mount=type=cache,target=$pm_cache_dir \ if [ -n "$debug" ]; then set -eux; fi && \ yarn install && tsc +RUN rm -rf "$pm_cache_dir/*" + +FROM node:${node_version}-alpine as serve -FROM node:${node_version}-slim as serve +RUN if [ -n "$debug" ]; then set -eux; fi && \ + apk update && if [ -z "$debug" ]; then apk upgrade; fi && \ + apk --no-cache add git ARG debug ARG node_env ARG node_options -ARG app_dir=/opt/opendevin/ui +ARG app_root=/opt/opendevin/ui ENV DEBUG=$debug ENV NODE_OPTIONS="$node_options" @@ -83,26 +88,36 @@ COPY --from=builder $build_dir/package.json $build_dir/package.json COPY --from=builder $build_dir/.npmrc $build_dir/.npmrc COPY --from=builder $build_dir/vite.config.js $build_dir/vite.config.js +COPY --from=builder /etc/ssl/openssl.cnf /etc/ssl/openssl.cnf +COPY --from=builder /root/devin/.ssl/server_privkey.pem /root/devin/.ssl/privkey.pem +COPY --from=builder /root/devin/.ssl/server_fullchain.pem /root/devin/.ssl/fullchain.pem + RUN --mount=type=cache,target=$pm_cache_dir \ if [ -n "$debug" ]; then set -eux; fi && \ npm config set prefix "${yarn_global_root}" && \ npm config set audit false && \ npm config set fund false && \ - yarn global add \ - vite esbuild nx@latest @nx/react + yarn global add vite esbuild nx@latest @nx/react RUN --mount=type=cache,target=$pm_cache_dir \ if [ -n "$debug" ]; then set -eux; fi && \ yarn add -P classnames webpack typescript -#RUN ls -al && exit 1 +COPY frontend/vite.config.js $app_dir/vite.config.js + +RUN --mount=type=cache,target=$build_dir/dist \ + echo "Finalizing build..." && \ + if [ -n "$debug" ]; then set -eux; fi && \ + rm -rf /var/lib/apt/lists/* && \ + npm cache clean --force && \ + yarn cache clean + +COPY docker/devin/web_ui/entrypoint.sh /docker-entrypoint.sh RUN --mount=type=cache,target=$build_dir/dist \ if [ -n "$debug" ]; then set -eux; fi && \ + chmod a+x /docker-entrypoint.sh && \ vite build --config vite.config.js --clearScreen false -COPY docker/devin/ui/entrypoint.sh /docker-entrypoint.sh -COPY frontend/vite.config.js $app_dir/vite.config.js - -ENTRYPOINT ["/docker-entrypoint.sh", "--"] -CMD ["-m", "mistral:7b", "-e", "llama2"] +ENTRYPOINT ["/bin/sh", "-c", "/docker-entrypoint.sh"] +CMD "-m ${DEFAULT_CHAT_MODEL} -e ${DEFAULT_EMBEDDINGS_MODEL} --" diff --git a/docker/devin/web_ui/entrypoint.sh b/docker/devin/web_ui/entrypoint.sh new file mode 100755 index 000000000000..a5ccca592349 --- /dev/null +++ b/docker/devin/web_ui/entrypoint.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +# Configure defaults +# Run API server + +if [ -n "${DEBUG}" ]; then set -eux; fi + +echo "Backend endpoint address http://${DEVIN_HOST}:${DEVIN_API_PORT}" + +if [ -n "${SECURE_MODE}" ]; then + export UI_PORT="${UI_HTTPS_PORT}" +else + export UI_PORT="${UI_HTTP_PORT}" +fi + +if [ -n "${SECURE_MODE}" ]; then + echo "Starting frontend server on http://0.0.0.0:${UI_PORT}" + vite --config vite.config.js --host 0.0.0.0 --port "${UI_PORT}" +else + echo "Starting frontend server on https://0.0.0.0:${UI_PORT}" + vite --config vite.config.js --host 0.0.0.0 --port "${UI_PORT}" \ + --clearScreen false +fi diff --git a/docker/env_debug.sh b/docker/env_debug.sh index 5a2d3fa807a2..5d418271ebf1 100644 --- a/docker/env_debug.sh +++ b/docker/env_debug.sh @@ -1,15 +1,3 @@ #!/bin/bash echo "Container hostname: $(hostname)" echo "Container IP: $(hostname -i)" -echo "Environment variables:" -env | grep NVIDIA -echo "Python environment and executables status:" -echo "Default '$(which python3)'" python3 --version -echo "PIP executable '$(which pip)' pip --version" -echo "Python executable in ${VENV_NAME}: $(conda run -n ${VENV_NAME} python3 --version)" -echo "Python executable in ${VENV_NAME}: $(conda run -n ${VENV_NAME} pip --version)" -echo "Conda environments info:" -conda info --envs -echo "Nvidia CUDA properties:" -nvidia-smi -# echo "Force-loading the default Ollama model:" diff --git a/docker/nginx/nginx.conf b/docker/nginx/nginx.conf index 84062ef36d00..55756ee1b73e 100644 --- a/docker/nginx/nginx.conf +++ b/docker/nginx/nginx.conf @@ -5,7 +5,7 @@ http { listen 8888; location / { - proxy_pass http://ui:4173; + proxy_pass http://web_ui:4173; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; diff --git a/docker/python_debug.sh b/docker/python_debug.sh new file mode 100644 index 000000000000..4232c6d78994 --- /dev/null +++ b/docker/python_debug.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +echo "Miniconda environment info:" +echo "Python executable in ${VENV_NAME}: $(conda run -n ${VENV_NAME} python3 --version)" +echo "Python executable in ${VENV_NAME}: $(conda run -n ${VENV_NAME} pip --version)" +echo "Conda environments info:" +conda info --envs + +echo "Python3 info:" +echo "Default python3 executable: '$(which python3)'" --version +echo "PIP executable '$(which pip)' pip --version" + +echo "Python environment variables:" +env | grep PYTHON \ No newline at end of file diff --git a/docs/documentation/AZURE_LLM_GUIDE.md b/docs/documentation/AZURE_LLM_GUIDE.md new file mode 100644 index 000000000000..e762b9ed4d88 --- /dev/null +++ b/docs/documentation/AZURE_LLM_GUIDE.md @@ -0,0 +1,42 @@ +# Azure OpenAI LLM Guide + +# 1. Completion + +OpenDevin uses LiteLLM for completion calls. You can find their documentation on Azure [here](https://docs.litellm.ai/docs/providers/azure) + +## azure openai configs + +During installation of OpenDevin, you can set up the following parameters: +``` +LLM_BASE_URL="" # e.g. "https://openai-gpt-4-test-v-1.openai.azure.com/" +LLM_API_KEY="" +LLM_MODEL="azure/" +``` + +They will be saved in the `config.toml` file in the `OpenDevin` directory. You can add or edit them manually in the file after installation. + +In addition, you need to set the following environment variable, which is used by the LiteLLM library to make requests to the Azure API: + +`AZURE_API_VERSION = "" # e.g. "2024-02-15-preview"` + +You can set the environment variable in your terminal or in an `.env` file in the `OpenDevin` directory. + +Alternatively, you can add all these in .env, however in that case make sure to check the LiteLLM documentation for the correct variables. + +# 2. Embeddings + +OpenDevin uses llama-index for embeddings. You can find their documentation on Azure [here](https://docs.llamaindex.ai/en/stable/api_reference/embeddings/azure_openai/) + +## azure openai configs + +The model used for Azure OpenAI embeddings is "text-embedding-ada-002". You need the correct deployment name for this model in your Azure account. + +During installation of OpenDevin, you can set the following parameters used for embeddings, when prompted by the makefile: + +``` +LLM_EMBEDDING_MODEL="azureopenai" +DEPLOYMENT_NAME = "" # e.g. "TextEmbedding..." +LLM_API_VERSION = "" # e.g. "2024-02-15-preview" +``` + +You can re-run ```make setup-config``` anytime, or add or edit them manually in the file afterwards. diff --git a/docs/documentation/LOCAL_LLM_GUIDE.md b/docs/documentation/LOCAL_LLM_GUIDE.md index ba1377d8b795..14b981882183 100644 --- a/docs/documentation/LOCAL_LLM_GUIDE.md +++ b/docs/documentation/LOCAL_LLM_GUIDE.md @@ -109,3 +109,29 @@ At this point everything should be set up and working properly. - In the first terminal `make start-backend` - In the second terminal `make start-frontend` 5. you should now be able to connect to `http://localhost:3001/` with your local model running! + + +## Additional Notes for WSL2 Users: + +1. If you encounter the following error during setup: `Exception: Failed to create opendevin user in sandbox: b'useradd: UID 0 is not unique\n'` +You can resolve it by running: + ``` + export SANDBOX_USER_ID=1000 + ``` + +2. If you face issues running Poetry even after installing it during the build process, you may need to add its binary path to your environment: + ``` + export PATH="$HOME/.local/bin:$PATH" + ``` + +3. If you experiencing issues related to networking, such as `NoneType object has no attribute 'request'` when executing `make run`, you may need to configure your WSL2 networking settings. Follow these steps: + - Open or create the `.wslconfig` file located at `C:\Users\%username%\.wslconfig` on your Windows host machine. + - Add the following configuration to the `.wslconfig` file: + ``` + [wsl2] + networkingMode=mirrored + localhostForwarding=true + ``` + - Save the `.wslconfig` file. + - Restart WSL2 completely by exiting any running WSL2 instances and executing the command `wsl --shutdown` in your command prompt or terminal. + - After restarting WSL, attempt to execute `make run` again. The networking issue should be resolved. \ No newline at end of file diff --git a/frontend/.eslintrc b/frontend/.eslintrc index 1ba38641372b..cd388fe9510f 100644 --- a/frontend/.eslintrc +++ b/frontend/.eslintrc @@ -32,6 +32,8 @@ "acc", "state" ] }], + "import/no-extraneous-dependencies": "off", + "@typescript-eslint/no-unused-vars": "warn", // For https://stackoverflow.com/questions/55844608/stuck-with-eslint-error-i-e-separately-loops-should-be-avoided-in-favor-of-arra "no-restricted-syntax": "off", "import/prefer-default-export": "off", diff --git a/frontend/src/App.test.tsx b/frontend/src/App.test.tsx index 2f52a5031a49..d76787ed69b4 100644 --- a/frontend/src/App.test.tsx +++ b/frontend/src/App.test.tsx @@ -5,6 +5,5 @@ import App from "./App"; test("renders learn react link", () => { render(); const linkElement = screen.getByText(/learn react/i); - // @ts-expect-error expect(linkElement).toBeInTheDocument(); }); diff --git a/frontend/src/components/ChatInterface.tsx b/frontend/src/components/ChatInterface.tsx index d8d8fa731d44..19b4be97978d 100644 --- a/frontend/src/components/ChatInterface.tsx +++ b/frontend/src/components/ChatInterface.tsx @@ -1,5 +1,6 @@ import { Card, CardBody } from "@nextui-org/react"; import React, { useEffect, useRef } from "react"; +import { IoMdChatbubbles } from "react-icons/io"; import { useSelector } from "react-redux"; import { useTypingEffect } from "../hooks/useTypingEffect"; import { @@ -116,7 +117,10 @@ function ChatInterface(): JSX.Element { return (
-
Chat
+
+ + Chat +
{initialized ? null : } diff --git a/frontend/src/components/CodeEditor.tsx b/frontend/src/components/CodeEditor.tsx index fc7d7ab3676e..f6cbd0f7aff8 100644 --- a/frontend/src/components/CodeEditor.tsx +++ b/frontend/src/components/CodeEditor.tsx @@ -1,11 +1,10 @@ import Editor, { Monaco } from "@monaco-editor/react"; +import { Tab, Tabs } from "@nextui-org/react"; import type { editor } from "monaco-editor"; import React, { useState } from "react"; -import { Tabs, Tab } from "@nextui-org/react"; import { useSelector } from "react-redux"; import { RootState } from "../store"; import Files from "./Files"; -import { cn } from "../utils/utils"; function CodeEditor(): JSX.Element { const [selectedFileName, setSelectedFileName] = useState("welcome"); @@ -31,27 +30,22 @@ function CodeEditor(): JSX.Element { }; return ( -
-
- -
-
+
+ +
@@ -62,18 +56,17 @@ function CodeEditor(): JSX.Element { : selectedFileName.toLocaleLowerCase() } title={!selectedFileName ? "Welcome" : selectedFileName} - > -
- -
- + />
+
+ +
); diff --git a/frontend/src/components/Files.tsx b/frontend/src/components/Files.tsx index 05273ca10145..57b7ec814462 100644 --- a/frontend/src/components/Files.tsx +++ b/frontend/src/components/Files.tsx @@ -1,20 +1,25 @@ +import { Accordion, AccordionItem } from "@nextui-org/react"; import React, { useEffect } from "react"; -import TreeView, { flattenTree } from "react-accessible-treeview"; +import TreeView, { + ITreeViewOnNodeSelectProps, + flattenTree, +} from "react-accessible-treeview"; import { AiOutlineFolder } from "react-icons/ai"; -import { Accordion, AccordionItem, Button } from "@nextui-org/react"; + import { - TbLayoutSidebarLeftCollapseFilled, - TbLayoutSidebarRightCollapseFilled, -} from "react-icons/tb"; + IoIosArrowDown, + IoIosArrowForward, + IoIosArrowBack, + IoIosRefresh, +} from "react-icons/io"; -import { IoIosArrowDown } from "react-icons/io"; -import { VscRefresh } from "react-icons/vsc"; -import { useSelector } from "react-redux"; +import { useDispatch, useSelector } from "react-redux"; import { getWorkspace, selectFile } from "../services/fileService"; import { setCode, updateWorkspace } from "../state/codeSlice"; -import store, { RootState } from "../store"; -import FolderIcon from "./FolderIcon"; +import { RootState } from "../store"; import FileIcon from "./FileIcons"; +import FolderIcon from "./FolderIcon"; +import IconButton, { IconButtonProps } from "./IconButton"; interface FilesProps { setSelectedFileName: React.Dispatch>; @@ -22,11 +27,38 @@ interface FilesProps { explorerOpen: boolean; } +function RefreshButton({ + onClick, + ariaLabel, +}: Omit): React.ReactElement { + return ( + } + onClick={onClick} + ariaLabel={ariaLabel} + /> + ); +} + +function CloseButton({ + onClick, + ariaLabel, +}: Omit): React.ReactElement { + return ( + } + onClick={onClick} + ariaLabel={ariaLabel} + /> + ); +} + function Files({ setSelectedFileName, setExplorerOpen, explorerOpen, }: FilesProps): JSX.Element { + const dispatch = useDispatch(); const workspaceFolder = useSelector( (state: RootState) => state.code.workspaceFolder, ); @@ -35,7 +67,7 @@ function Files({ const workspaceTree = flattenTree(workspaceFolder); useEffect(() => { - getWorkspace().then((file) => store.dispatch(updateWorkspace(file))); + getWorkspace().then((file) => dispatch(updateWorkspace(file))); }, []); if (workspaceTree.length <= 1) { @@ -46,9 +78,10 @@ function Files({ if (!explorerOpen) { return ( -
-
- +
+ setExplorerOpen(true)} /> @@ -56,8 +89,28 @@ function Files({
); } + + const handleNodeSelect = (node: ITreeViewOnNodeSelectProps) => { + if (!node.isBranch) { + let fullPath = node.element.name; + setSelectedFileName(fullPath); + let currentNode = workspaceTree.find( + (file) => file.id === node.element.id, + ); + while (currentNode !== undefined && currentNode.parent) { + currentNode = workspaceTree.find( + (file) => file.id === node.element.parent, + ); + fullPath = `${currentNode?.name}/${fullPath}`; + } + selectFile(fullPath).then((code) => { + dispatch(setCode(code)); + }); + } + }; + return ( -
+
- {workspaceFolder.name} -
- - + ariaLabel="Close Explorer" + />
} @@ -115,30 +150,13 @@ function Files({
} > -
+
node.id)} - onNodeSelect={(node) => { - if (!node.isBranch) { - let fullPath = node.element.name; - setSelectedFileName(fullPath); - let currentNode = workspaceTree.find( - (file) => file.id === node.element.id, - ); - while (currentNode !== undefined && currentNode.parent) { - currentNode = workspaceTree.find( - (file) => file.id === node.element.parent, - ); - fullPath = `${currentNode!.name}/${fullPath}`; - } - selectFile(fullPath).then((code) => { - store.dispatch(setCode(code)); - }); - } - }} + onNodeSelect={handleNodeSelect} // eslint-disable-next-line react/no-unstable-nested-components nodeRenderer={({ element, @@ -151,9 +169,9 @@ function Files({ // eslint-disable-next-line react/jsx-props-no-spreading {...getNodeProps()} style={{ paddingLeft: 20 * (level - 1) }} - className="cursor-pointer rounded-[5px] p-1 nowrap flex items-center gap-2 aria-selected:bg-neutral-600 aria-selected:text-neutral-50 hover:text-neutral-50" + className="cursor-pointer rounded-[5px] p-1 nowrap flex items-center gap-2 aria-selected:bg-neutral-600 aria-selected:text-white hover:text-white" > -
+
{isBranch ? ( ) : ( diff --git a/frontend/src/components/IconButton.tsx b/frontend/src/components/IconButton.tsx new file mode 100644 index 000000000000..d7e9390269f3 --- /dev/null +++ b/frontend/src/components/IconButton.tsx @@ -0,0 +1,28 @@ +import { Button } from "@nextui-org/react"; +import React, { MouseEventHandler, ReactElement } from "react"; + +export interface IconButtonProps { + icon: ReactElement; + onClick: MouseEventHandler; + ariaLabel: string; +} + +function IconButton({ + icon, + onClick, + ariaLabel, +}: IconButtonProps): React.ReactElement { + return ( + + ); +} + +export default IconButton; diff --git a/frontend/src/components/Input.tsx b/frontend/src/components/Input.tsx index e3a5d037da3d..b872a0c07c53 100644 --- a/frontend/src/components/Input.tsx +++ b/frontend/src/components/Input.tsx @@ -1,15 +1,16 @@ import { Textarea } from "@nextui-org/react"; import React, { ChangeEvent, KeyboardEvent, useState } from "react"; -// import { useTranslation } from "react-i18next"; +import { useTranslation } from "react-i18next"; +import { VscSend } from "react-icons/vsc"; import { useSelector } from "react-redux"; import { twMerge } from "tailwind-merge"; -import i18next from "i18next"; import useInputComposition from "../hooks/useInputComposition"; +import { I18nKey } from "../i18n/declaration"; import { sendChatMessage } from "../services/chatService"; import { RootState } from "../store"; function Input() { - // const { t } = useTranslation(); + const { t } = useTranslation(); const { initialized } = useSelector((state: RootState) => state.task); const [inputMessage, setInputMessage] = useState(""); @@ -46,6 +47,7 @@ function Input() {