diff --git a/.github/.codecov.yml b/.github/.codecov.yml index bebce2bba604..06ea0d7f2e9d 100644 --- a/.github/.codecov.yml +++ b/.github/.codecov.yml @@ -1,6 +1,9 @@ codecov: notify: wait_for_ci: true + # our project is large, so 6 builds are typically uploaded. this waits till 5/6 + # See https://docs.codecov.com/docs/notifications#section-preventing-notifications-until-after-n-builds + after_n_builds: 5 coverage: status: @@ -14,6 +17,3 @@ coverage: comment: false github_checks: annotations: false - -ignore: - - "agenthub/SWE_agent/**" # SWE agent is deprecated diff --git a/.github/ISSUE_TEMPLATE/bug_template.yml b/.github/ISSUE_TEMPLATE/bug_template.yml index 8a645a242875..e0286ea3fcd4 100644 --- a/.github/ISSUE_TEMPLATE/bug_template.yml +++ b/.github/ISSUE_TEMPLATE/bug_template.yml @@ -72,4 +72,4 @@ body: id: additional-context attributes: label: Logs, Errors, Screenshots, and Additional Context - description: LLM logs will be stored in the `logs/llm/default` folder. Please add any additional context about the problem here. + description: If you want to share the chat history you can click the thumbs-down (👎) button above the input field and you will get a shareable link (you can also click thumbs up when things are going well of course!). LLM logs will be stored in the `logs/llm/default` folder. Please add any additional context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md deleted file mode 100644 index d03f14a49adf..000000000000 --- a/.github/ISSUE_TEMPLATE/question.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -name: Question -about: Use this template to ask a question regarding the project. -title: '' -labels: question -assignees: '' - ---- - -## Describe your question - - - -## Additional context - - diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 5b87df069c77..2669165c607b 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -9,7 +9,14 @@ updates: directory: "/" # Location of package manifests schedule: interval: "daily" + open-pull-requests-limit: 20 - package-ecosystem: "npm" # See documentation for possible values directory: "/frontend" # Location of package manifests schedule: interval: "daily" + open-pull-requests-limit: 20 + - package-ecosystem: "npm" # See documentation for possible values + directory: "/docs" # Location of package manifests + schedule: + interval: "daily" + open-pull-requests-limit: 20 diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000000..b15e920a2829 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,8 @@ +**What is the problem that this fixes or functionality that this introduces? Does it fix any open issues?** + +--- + +**Give a summary of what the PR does, explaining any non-trivial design decisions** + +--- +**Other references** diff --git a/.github/workflows/dummy-agent-test.yml b/.github/workflows/dummy-agent-test.yml index 392876db22bd..8422f0c361ef 100644 --- a/.github/workflows/dummy-agent-test.yml +++ b/.github/workflows/dummy-agent-test.yml @@ -1,4 +1,4 @@ -name: Run e2e test with dummy agent +name: Run E2E test with dummy agent concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -30,4 +30,13 @@ jobs: wget https://huggingface.co/BAAI/bge-small-en-v1.5/raw/main/1_Pooling/config.json -P /tmp/llama_index/models--BAAI--bge-small-en-v1.5/snapshots/5c38ec7c405ec4b44b94cc5a9bb96e735b38267a/1_Pooling/ - name: Run tests run: | - poetry run python opendevin/core/main.py -t "do a flip" -m ollama/not-a-model -d ./workspace/ -c DummyAgent + set -e + poetry run python opendevin/core/main.py -t "do a flip" -d ./workspace/ -c DummyAgent + - name: Check exit code + run: | + if [ $? -ne 0 ]; then + echo "Test failed" + exit 1 + else + echo "Test passed" + fi diff --git a/.github/workflows/ghcr.yml b/.github/workflows/ghcr.yml index 426a6e47fc1d..43427b641b11 100644 --- a/.github/workflows/ghcr.yml +++ b/.github/workflows/ghcr.yml @@ -1,4 +1,4 @@ -name: Publish Docker Image +name: Build Publish and Test Docker Image concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -7,7 +7,7 @@ concurrency: on: push: branches: - - main + - main tags: - '*' pull_request: @@ -19,19 +19,23 @@ on: default: '' jobs: - ghcr_build_and_push: + ghcr_build: runs-on: ubuntu-latest + outputs: + tags: ${{ steps.capture-tags.outputs.tags }} + permissions: contents: read packages: write strategy: matrix: - image: ["app", "sandbox"] + image: ["sandbox", "opendevin"] + platform: ["amd64", "arm64"] steps: - - name: checkout + - name: Checkout uses: actions/checkout@v4 - name: Free Disk Space (Ubuntu) @@ -40,7 +44,6 @@ jobs: # this might remove tools that are actually needed, # if set to "true" but frees about 6 GB tool-cache: true - # all of these default to true, but feel free to set to # "false" if necessary for your workflow android: true @@ -57,26 +60,204 @@ jobs: id: buildx uses: docker/setup-buildx-action@v3 - - name: Login to ghcr - uses: docker/login-action@v1 + - name: Build and export image + id: build + run: ./containers/build.sh ${{ matrix.image }} ${{ github.repository_owner }} ${{ matrix.platform }} + + - name: Capture tags + id: capture-tags + run: | + tags=$(cat tags.txt) + echo "tags=$tags" + echo "tags=$tags" >> $GITHUB_OUTPUT + + - name: Upload Docker image as artifact + uses: actions/upload-artifact@v4 + with: + name: ${{ matrix.image }}-docker-image-${{ matrix.platform }} + path: /tmp/${{ matrix.image }}_image_${{ matrix.platform }}.tar + + test-for-sandbox: + name: Test for Sandbox + runs-on: ubuntu-latest + needs: ghcr_build + env: + PERSIST_SANDBOX: "false" + steps: + - uses: actions/checkout@v4 + + - name: Install poetry via pipx + run: pipx install poetry + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + cache: "poetry" + + - name: Install Python dependencies using Poetry + run: make install-python-dependencies + + - name: Download sandbox Docker image + uses: actions/download-artifact@v4 + with: + name: sandbox-docker-image-amd64 + path: /tmp/ + + - name: Load sandbox image and run sandbox tests + run: | + # Load the Docker image and capture the output + output=$(docker load -i /tmp/sandbox_image_amd64.tar) + + # Extract the first image name from the output + image_name=$(echo "$output" | grep -oP 'Loaded image: \K.*' | head -n 1) + + # Print the full name of the image + echo "Loaded Docker image: $image_name" + + SANDBOX_CONTAINER_IMAGE=$image_name TEST_IN_CI=true poetry run pytest --cov=agenthub --cov=opendevin --cov-report=xml -s ./tests/unit/test_sandbox.py + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + + integration-tests-on-linux: + name: Integration Tests on Linux + runs-on: ubuntu-latest + needs: ghcr_build + env: + PERSIST_SANDBOX: "false" + strategy: + fail-fast: false + matrix: + python-version: ["3.11"] + sandbox: ["ssh", "local"] + steps: + - uses: actions/checkout@v4 + + - name: Install poetry via pipx + run: pipx install poetry + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: 'poetry' + + - name: Install Python dependencies using Poetry + run: make install-python-dependencies + + - name: Download sandbox Docker image + uses: actions/download-artifact@v4 + with: + name: sandbox-docker-image-amd64 + path: /tmp/ + + - name: Load sandbox image and run integration tests + env: + SANDBOX_BOX_TYPE: ${{ matrix.sandbox }} + run: | + # Load the Docker image and capture the output + output=$(docker load -i /tmp/sandbox_image_amd64.tar) + + # Extract the first image name from the output + image_name=$(echo "$output" | grep -oP 'Loaded image: \K.*' | head -n 1) + + # Print the full name of the image + echo "Loaded Docker image: $image_name" + + SANDBOX_CONTAINER_IMAGE=$image_name TEST_IN_CI=true TEST_ONLY=true ./tests/integration/regenerate.sh + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + + ghcr_push: + runs-on: ubuntu-latest + # don't push if integration tests or sandbox tests fail + needs: [ghcr_build, integration-tests-on-linux, test-for-sandbox] + if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') + + env: + tags: ${{ needs.ghcr_build.outputs.tags }} + + permissions: + contents: read + packages: write + + strategy: + matrix: + image: ["sandbox", "opendevin"] + platform: ["amd64", "arm64"] + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Login to GHCR + uses: docker/login-action@v2 with: registry: ghcr.io username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Build and push ${{ matrix.image }} - if: "!github.event.pull_request.head.repo.fork" - run: | - ./containers/build.sh ${{ matrix.image }} ${{ github.repository_owner }} --push + - name: Download Docker images + uses: actions/download-artifact@v4 + with: + name: ${{ matrix.image }}-docker-image-${{ matrix.platform }} + path: /tmp/${{ matrix.platform }} - - name: Build ${{ matrix.image }} - if: "github.event.pull_request.head.repo.fork" + - name: Load images and push to registry run: | - ./containers/build.sh ${{ matrix.image }} ${{ github.repository_owner }} + mv /tmp/${{ matrix.platform }}/${{ matrix.image }}_image_${{ matrix.platform }}.tar . + loaded_image=$(docker load -i ${{ matrix.image }}_image_${{ matrix.platform }}.tar | grep "Loaded image:" | head -n 1 | awk '{print $3}') + echo "loaded image = $loaded_image" + tags=$(echo ${tags} | tr ' ' '\n') + image_name=$(echo "ghcr.io/${{ github.repository_owner }}/${{ matrix.image }}" | tr '[:upper:]' '[:lower:]') + echo "image name = $image_name" + for tag in $tags; do + echo "tag = $tag" + docker tag $loaded_image $image_name:${tag}_${{ matrix.platform }} + docker push $image_name:${tag}_${{ matrix.platform }} + done - docker_build_success: - name: Docker Build Success + create_manifest: runs-on: ubuntu-latest - needs: ghcr_build_and_push + needs: [ghcr_build, ghcr_push] + if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') + + env: + tags: ${{ needs.ghcr_build.outputs.tags }} + + strategy: + matrix: + image: ["sandbox", "opendevin"] + + permissions: + contents: read + packages: write + steps: - - run: echo Done! + - name: Checkout code + uses: actions/checkout@v4 + + - name: Login to GHCR + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Create and push multi-platform manifest + run: | + image_name=$(echo "ghcr.io/${{ github.repository_owner }}/${{ matrix.image }}" | tr '[:upper:]' '[:lower:]') + echo "image name = $image_name" + tags=$(echo ${tags} | tr ' ' '\n') + for tag in $tags; do + echo 'tag = $tag' + docker buildx imagetools create --tag $image_name:$tag \ + $image_name:${tag}_amd64 \ + $image_name:${tag}_arm64 + done diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 0ff1f383e004..7233bdb25aee 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -47,11 +47,4 @@ jobs: - name: Install pre-commit run: pip install pre-commit==3.7.0 - name: Run pre-commit hooks - if: github.ref != 'refs/heads/main' - run: | - git fetch https://github.com/OpenDevin/OpenDevin.git main:main && \ - pre-commit run \ - --files \ - $(git diff --name-only $(git merge-base main $(git branch --show-current)) $(git branch --show-current) | tr '\n' ' ') \ - --show-diff-on-failure \ - --config ./dev_config/python/.pre-commit-config.yaml + run: pre-commit run --files opendevin/**/* agenthub/**/* evaluation/**/* tests/**/* --show-diff-on-failure --config ./dev_config/python/.pre-commit-config.yaml diff --git a/.github/workflows/review-pr.yml b/.github/workflows/review-pr.yml index 28eca24faa11..1a9aeccc391c 100644 --- a/.github/workflows/review-pr.yml +++ b/.github/workflows/review-pr.yml @@ -44,12 +44,24 @@ jobs: echo "" >> task.txt echo "Diff file is: ${{ github.event.pull_request.number }}.diff" >> task.txt + - name: Set up environment + run: | + curl -sSL https://install.python-poetry.org | python3 - + export PATH="/github/home/.local/bin:$PATH" + poetry install --without evaluation + poetry run playwright install --with-deps chromium + - name: Run OpenDevin env: LLM_API_KEY: ${{ secrets.OPENAI_API_KEY }} - SANDBOX_TYPE: exec + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + SANDBOX_BOX_TYPE: ssh run: | - WORKSPACE_MOUNT_PATH=$GITHUB_WORKSPACE python ./opendevin/core/main.py -i 50 -f task.txt -d $GITHUB_WORKSPACE + # Append path to launch poetry + export PATH="/github/home/.local/bin:$PATH" + # Append path to correctly import package, note: must set pwd at first + export PYTHONPATH=$(pwd):$PYTHONPATH + WORKSPACE_MOUNT_PATH=$GITHUB_WORKSPACE poetry run python ./opendevin/core/main.py -i 50 -f task.txt -d $GITHUB_WORKSPACE rm task.txt - name: Check if review file is non-empty diff --git a/.github/workflows/run-integration-tests.yml b/.github/workflows/run-integration-tests.yml deleted file mode 100644 index 3f0f80e3e580..000000000000 --- a/.github/workflows/run-integration-tests.yml +++ /dev/null @@ -1,104 +0,0 @@ -name: Run Integration Tests - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} - -on: - push: - branches: - - main - paths-ignore: - - '**/*.md' - - 'frontend/**' - - 'docs/**' - - 'evaluation/**' - pull_request: - -env: - PERSIST_SANDBOX : "false" - -jobs: - integration-tests-on-linux: - name: Integration Tests on Linux - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - python-version: ["3.11"] - sandbox: ["ssh", "exec", "local"] - steps: - - uses: actions/checkout@v4 - - - name: Install poetry via pipx - run: pipx install poetry - - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - cache: 'poetry' - - - name: Install Python dependencies using Poetry - run: poetry install - - - name: Build Environment - run: make build - - - name: Run Integration Tests - env: - SANDBOX_TYPE: ${{ matrix.sandbox }} - run: | - TEST_IN_CI=true TEST_ONLY=true ./tests/integration/regenerate.sh - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v4 - env: - CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} - - integration-tests-on-mac: - name: Integration Tests on MacOS - runs-on: macos-13 - if: contains(github.event.pull_request.title, 'mac') || contains(github.event.pull_request.title, 'Mac') - strategy: - fail-fast: false - matrix: - python-version: ["3.11"] - sandbox: ["ssh"] - steps: - - uses: actions/checkout@v4 - - - name: Install poetry via pipx - run: pipx install poetry - - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - cache: 'poetry' - - - name: Install Python dependencies using Poetry - run: poetry install - - - name: Install & Start Docker - run: | - brew install colima docker - colima start - - # For testcontainers to find the Colima socket - # https://github.com/abiosoft/colima/blob/main/docs/FAQ.md#cannot-connect-to-the-docker-daemon-at-unixvarrundockersock-is-the-docker-daemon-running - sudo ln -sf $HOME/.colima/default/docker.sock /var/run/docker.sock - - - name: Build Environment - run: make build - - - name: Run Integration Tests - env: - SANDBOX_TYPE: ${{ matrix.sandbox }} - run: | - TEST_IN_CI=true TEST_ONLY=true ./tests/integration/regenerate.sh - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v4 - env: - CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/run-unit-tests.yml b/.github/workflows/run-unit-tests.yml index f6df12579f0a..8f17702f2c27 100644 --- a/.github/workflows/run-unit-tests.yml +++ b/.github/workflows/run-unit-tests.yml @@ -19,11 +19,40 @@ env: PERSIST_SANDBOX : "false" jobs: + fe-test: + runs-on: ubuntu-latest + + strategy: + matrix: + node-version: [20] + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: ${{ matrix.node-version }} + + - name: Install dependencies + working-directory: ./frontend + run: npm ci + + - name: Run tests and collect coverage + working-directory: ./frontend + run: npm run test:coverage + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + test-on-macos: name: Test on macOS - runs-on: macos-13 + runs-on: macos-12 env: - INSTALL_DOCKER: "0" # Set to '0' to skip Docker installation + INSTALL_DOCKER: "1" # Set to '0' to skip Docker installation strategy: matrix: python-version: ["3.11"] @@ -46,8 +75,37 @@ jobs: - name: Install & Start Docker if: env.INSTALL_DOCKER == '1' run: | - brew install colima docker - colima start + # Uninstall colima to upgrade to the latest version + if brew list colima &>/dev/null; then + brew uninstall colima + # unlinking colima dependency: go + brew uninstall go@1.21 + fi + rm -rf ~/.colima ~/.lima + brew install --HEAD colima + brew services start colima + brew install docker + colima delete + # Attempt to start Colima + ATTEMPT_LIMIT=3 + + start_colima() { + colima start --network-address --arch x86_64 --cpu=1 --memory=1 + } + + for i in $(seq 1 $ATTEMPT_LIMIT); do + if start_colima; then + echo "Colima started successfully." + break + else + echo "Failed to start Colima. Attempt $i/$ATTEMPT_LIMIT." + fi + done + + if [ $i -gt $ATTEMPT_LIMIT ]; then + echo "Failed to start Colima after $ATTEMPT_LIMIT attempts." + exit 1 + fi # For testcontainers to find the Colima socket # https://github.com/abiosoft/colima/blob/main/docs/FAQ.md#cannot-connect-to-the-docker-daemon-at-unixvarrundockersock-is-the-docker-daemon-running @@ -97,33 +155,3 @@ jobs: uses: codecov/codecov-action@v4 env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} - - test-for-sandbox: - name: Test for Sandbox - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - - name: Install poetry via pipx - run: pipx install poetry - - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: "3.11" - cache: "poetry" - - - name: Install Python dependencies using Poetry - run: poetry install - - - name: Build Environment - run: make build - - - name: Run Integration Test for Sandbox - run: | - poetry run pytest --cov=agenthub --cov=opendevin --cov-report=xml -s ./tests/unit/test_sandbox.py - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v4 - env: - CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/solve-issue.yml b/.github/workflows/solve-issue.yml index a193075a2ff0..df965b95542e 100644 --- a/.github/workflows/solve-issue.yml +++ b/.github/workflows/solve-issue.yml @@ -36,14 +36,27 @@ jobs: echo "BODY:" >> task.txt echo "${ISSUE_BODY}" >> task.txt + - name: Set up environment + run: | + curl -sSL https://install.python-poetry.org | python3 - + export PATH="/github/home/.local/bin:$PATH" + poetry install --without evaluation + poetry run playwright install --with-deps chromium + + - name: Run OpenDevin env: ISSUE_TITLE: ${{ github.event.issue.title }} ISSUE_BODY: ${{ github.event.issue.body }} LLM_API_KEY: ${{ secrets.OPENAI_API_KEY }} - SANDBOX_TYPE: exec + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + SANDBOX_BOX_TYPE: ssh run: | - WORKSPACE_MOUNT_PATH=$GITHUB_WORKSPACE python ./opendevin/core/main.py -i 50 -f task.txt -d $GITHUB_WORKSPACE + # Append path to launch poetry + export PATH="/github/home/.local/bin:$PATH" + # Append path to correctly import package, note: must set pwd at first + export PYTHONPATH=$(pwd):$PYTHONPATH + WORKSPACE_MOUNT_PATH=$GITHUB_WORKSPACE poetry run python ./opendevin/core/main.py -i 50 -f task.txt -d $GITHUB_WORKSPACE rm task.txt - name: Setup Git, Create Branch, and Commit Changes diff --git a/.github/workflows/update-pyproject-version.yml b/.github/workflows/update-pyproject-version.yml new file mode 100644 index 000000000000..24fa5429c557 --- /dev/null +++ b/.github/workflows/update-pyproject-version.yml @@ -0,0 +1,48 @@ +name: Update pyproject.toml Version and Tags + +on: + release: + types: + - published + +jobs: + update-pyproject-and-tags: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 # Fetch all history for all branches and tags + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install toml + + - name: Get release tag + id: get_release_tag + run: echo "RELEASE_TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV + + - name: Update pyproject.toml with release tag + run: | + python -c " + import toml + with open('pyproject.toml', 'r') as f: + data = toml.load(f) + data['tool']['poetry']['version'] = '${{ env.RELEASE_TAG }}' + with open('pyproject.toml', 'w') as f: + toml.dump(data, f) + " + + - name: Commit and push pyproject.toml changes + uses: stefanzweifel/git-auto-commit-action@v4 + with: + commit_message: "Update pyproject.toml version to ${{ env.RELEASE_TAG }}" + branch: main + file_pattern: pyproject.toml diff --git a/.gitignore b/.gitignore index 5f670fbf2a41..daa5a473580d 100644 --- a/.gitignore +++ b/.gitignore @@ -161,9 +161,14 @@ cython_debug/ # option (not recommended) you can uncomment the following to ignore the entire idea folder. .idea/ .vscode/ +.cursorignore # evaluation +evaluation/evaluation_outputs +evaluation/outputs +evaluation/swe_bench/eval_workspace* evaluation/SWE-bench/data +evaluation/webarena/scripts/webarena_env.sh # frontend @@ -176,6 +181,8 @@ frontend/yarn.lock # testing frontend/coverage +test_results* +/_test_files_tmp/ # production frontend/build @@ -204,8 +211,9 @@ cache # configuration config.toml config.toml.bak -evaluation/swe_bench/eval_workspace* -evaluation/outputs -evaluation/evaluation_outputs -test_results* -/_test_files_tmp/ + +containers/agnostic_sandbox + +# swe-bench-eval +image_build_logs +run_instance_logs diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 1f5f3a2c1964..8c66075aef23 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -61,7 +61,7 @@ representative at an online or offline event. Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at -contact@rbren.io +contact@all-hands.dev All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 96e82facc3a0..6a0849d24dee 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,13 +1,32 @@ # Contributing -Thanks for your interest in contributing to OpenDevin! We welcome and appreciate contributions. -To report bugs, create a [GitHub issue](https://github.com/OpenDevin/OpenDevin/issues/new/choose). +Thanks for your interest in contributing to OpenDevin! We welcome and appreciate contributions. -## Contribution Guide -### 1. Fork the Official Repository +## How Can I Contribute? + +There are many ways that you can contribute: + +1. **Download and use** OpenDevin, and send [issues](https://github.com/OpenDevin/OpenDevin/issues) when you encounter something that isn't working or a feature that you'd like to see. +2. **Send feedback** after each session by [clicking the thumbs-up thumbs-down buttons](https://opendevin.github.io/OpenDevin/modules/usage/feedback), so we can see where things are working and failing, and also build an open dataset for training code agents. +3. **Improve the Codebase** by sending PRs (see details below). In particular, we have some [good first issue](https://github.com/OpenDevin/OpenDevin/labels/good%20first%20issue) issues that may be ones to start on. -Fork [OpenDevin repository](https://github.com/OpenDevin/OpenDevin) into your own account. -Clone your own forked repository into your local environment. +## Understanding OpenDevin's CodeBase + +To understand the codebase, please refer to the README in each module: +- [frontend](./frontend/README.md) +- [agenthub](./agenthub/README.md) +- [evaluation](./evaluation/README.md) +- [opendevin](./opendevin/README.md) + - [server](./opendevin/server/README.md) + +When you write code, it is also good to write tests. Please navigate to the `tests` folder to see existing test suites. +At the moment, we have two kinds of tests: `unit` and `integration`. Please refer to the README for each test suite. These tests also run on GitHub's continuous integration to ensure quality of the project. + +## Sending Pull Requests to OpenDevin + +### 1. Fork the Official Repository +Fork the [OpenDevin repository](https://github.com/OpenDevin/OpenDevin) into your own account. +Clone your own forked repository into your local environment: ```shell git clone git@github.com:/OpenDevin.git @@ -16,21 +35,23 @@ git clone git@github.com:/OpenDevin.git ### 2. Configure Git Set the official repository as your [upstream](https://www.atlassian.com/git/tutorials/git-forks-and-upstreams) to synchronize with the latest update in the official repository. -Add the original repository as upstream +Add the original repository as upstream: ```shell cd OpenDevin git remote add upstream git@github.com:OpenDevin/OpenDevin.git ``` -Verify that the remote is set. +Verify that the remote is set: + ```shell git remote -v ``` + You should see both `origin` and `upstream` in the output. ### 3. Synchronize with Official Repository -Synchronize latest commit with official repository before coding. +Synchronize latest commit with official repository before coding: ```shell git fetch upstream @@ -39,25 +60,48 @@ git merge upstream/main git push origin main ``` -### 4. Create a New Branch And Open a Pull Request -After you finish implementation, open forked repository. The source branch is your new branch, and the target branch is `OpenDevin/OpenDevin` `main` branch. Then PR should appears in [OpenDevin PRs](https://github.com/OpenDevin/OpenDevin/pulls). +### 4. Set up the Development Environment + +We have a separate doc [Development.md](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md) that tells you how to set up a development workflow. + +### 5. Write Code and Commit It -Then OpenDevin team will review your code. +Once you have done this, you can write code, test it, and commit it to a branch (replace `my_branch` with an appropriate name): + +```shell +git checkout -b my_branch +git add . +git commit +git push origin my_branch +``` + +### 6. Open a Pull Request + +* On GitHub, go to the page of your forked repository, and create a Pull Request: + - Click on `Branches` + - Click on the `...` beside your branch and click on `New pull request` + - Set `base repository` to `OpenDevin/OpenDevin` + - Set `base` to `main` + - Click `Create pull request` + +The PR should appear in [OpenDevin PRs](https://github.com/OpenDevin/OpenDevin/pulls). + +Then the OpenDevin team will review your code. ## PR Rules ### 1. Pull Request title - -As described in [here](https://github.com/commitizen/conventional-commit-types/blob/master/index.json), a valid PR title should begin with one of the following prefixes: +As described [here](https://github.com/commitizen/conventional-commit-types/blob/master/index.json), a valid PR title should begin with one of the following prefixes: - `feat`: A new feature - `fix`: A bug fix -- `doc`: Documentation only changes +- `docs`: Documentation only changes +- `style`: Changes that do not affect the meaning of the code (white space, formatting, missing semicolons, etc.) - `refactor`: A code change that neither fixes a bug nor adds a feature -- `style`: A refactoring that improves code style - `perf`: A code change that improves performance - `test`: Adding missing tests or correcting existing tests -- `ci`: Changes to CI configuration files and scripts (example scopes: `.github`, `ci` (Buildkite)) +- `build`: Changes that affect the build system or external dependencies (example scopes: gulp, broccoli, npm) +- `ci`: Changes to our CI configuration files and scripts (example scopes: Travis, Circle, BrowserStack, SauceLabs) - `chore`: Other changes that don't modify src or test files - `revert`: Reverts a previous commit @@ -67,24 +111,7 @@ For example, a PR title could be: You may also check out previous PRs in the [PR list](https://github.com/OpenDevin/OpenDevin/pulls). -As described in [here](https://github.com/OpenDevin/OpenDevin/labels), we create several labels. Every PR should be tagged with the corresponding labels. - ### 2. Pull Request description - - If your PR is small (such as a typo fix), you can go brief. -- If it is large and you have changed a lot, it's better to write more details. - - -## How to begin -Please refer to the README in each module: -- [frontend](./frontend/README.md) -- [agenthub](./agenthub/README.md) -- [evaluation](./evaluation/README.md) -- [opendevin](./opendevin/README.md) - - [server](./opendevin/server/README.md) - - [mock server](./opendevin/mock/README.md) +- If it contains a lot of changes, it's better to write more details. -## Tests -Please navigate to `tests` folder to see existing test suites. -At the moment, we have two kinds of tests: `unit` and `integration`. Please refer to the README for each test suite. These tests also run on CI to ensure quality of -the project. diff --git a/Development.md b/Development.md index babb42ab8f68..69c6d03dc0b7 100644 --- a/Development.md +++ b/Development.md @@ -1,14 +1,16 @@ # Development Guide This guide is for people working on OpenDevin and editing the source code. +If you wish to contribute your changes, check out the [CONTRIBUTING.md](https://github.com/OpenDevin/OpenDevin/blob/main/CONTRIBUTING.md) on how to clone and setup the project initially before moving on. +Otherwise, you can clone the OpenDevin project directly. ## Start the server for development - ### 1. Requirements -* Linux, Mac OS, or [WSL on Windows](https://learn.microsoft.com/en-us/windows/wsl/install) +* Linux, Mac OS, or [WSL on Windows](https://learn.microsoft.com/en-us/windows/wsl/install) [ Ubuntu <= 22.04] * [Docker](https://docs.docker.com/engine/install/) (For those on MacOS, make sure to allow the default Docker socket to be used from advanced settings!) * [Python](https://www.python.org/downloads/) = 3.11 * [NodeJS](https://nodejs.org/en/download/package-manager) >= 18.17.1 * [Poetry](https://python-poetry.org/docs/#installing-with-the-official-installer) >= 1.8 +* netcat => sudo apt-get install netcat Make sure you have all these dependencies installed before moving on to `make build`. @@ -27,41 +29,40 @@ mamba install conda-forge::poetry ``` ### 2. Build and Setup The Environment +Begin by building the project which includes setting up the environment and installing dependencies. This step ensures that OpenDevin is ready to run on your system: -- **Build the Project:** Begin by building the project, which includes setting up the environment and installing dependencies. This step ensures that OpenDevin is ready to run smoothly on your system. - ```bash - make build - ``` +```bash +make build +``` ### 3. Configuring the Language Model - OpenDevin supports a diverse array of Language Models (LMs) through the powerful [litellm](https://docs.litellm.ai) library. By default, we've chosen the mighty GPT-4 from OpenAI as our go-to model, but the world is your oyster! You can unleash the potential of Anthropic's suave Claude, the enigmatic Llama, or any other LM that piques your interest. -To configure the LM of your choice, follow these steps: - -1. **Using the Makefile: The Effortless Approach** - With a single command, you can have a smooth LM setup for your OpenDevin experience. Simply run: +To configure the LM of your choice, run: + ```bash make setup-config ``` + This command will prompt you to enter the LLM API key, model name, and other variables ensuring that OpenDevin is tailored to your specific needs. Note that the model name will apply only when you run headless. If you use the UI, please set the model in the UI. + + Note: If you have previously run OpenDevin using the docker command, you may have already set some environmental variables in your terminal. The final configurations are set from highest to lowest priority: + Environment variables > config.toml variables > default variables **Note on Alternative Models:** -Some alternative models may prove more challenging to tame than others. Fear not, brave adventurer! We shall soon unveil LLM-specific documentation to guide you on your quest. And if you've already mastered the art of wielding a model other than OpenAI's GPT, we encourage you to [share your setup instructions with us](https://github.com/OpenDevin/OpenDevin/issues/417). +Some alternative models may prove more challenging to tame than others. Fear not, brave adventurer! We shall soon unveil LLM-specific documentation to guide you on your quest. +And if you've already mastered the art of wielding a model other than OpenAI's GPT, we encourage you to share your setup instructions with us by creating instructions and adding it [to our documentation](https://github.com/OpenDevin/OpenDevin/tree/main/docs/modules/usage/llms). For a full list of the LM providers and models available, please consult the [litellm documentation](https://docs.litellm.ai/docs/providers). -There is also [documentation for running with local models using ollama](./docs/documentation/LOCAL_LLM_GUIDE.md). - -### 4. Run the Application - -- **Run the Application:** Once the setup is complete, launching OpenDevin is as simple as running a single command. This command starts both the backend and frontend servers seamlessly, allowing you to interact with OpenDevin without any hassle. - ```bash - make run - ``` - -### 5. Individual Server Startup +### 4. Running the application +#### Option A: Run the Full Application +Once the setup is complete, launching OpenDevin is as simple as running a single command. This command starts both the backend and frontend servers seamlessly, allowing you to interact with OpenDevin: +```bash +make run +``` +#### Option B: Individual Server Startup - **Start the Backend Server:** If you prefer, you can start the backend server independently to focus on backend-related tasks or configurations. ```bash make start-backend @@ -73,29 +74,26 @@ There is also [documentation for running with local models using ollama](./docs/ ``` ### 6. LLM Debugging - -If you encounter any issues with the Language Model (LM) or you're simply curious, you can inspect the actual LLM prompts and responses. To do so, export DEBUG=1 in the environment and restart the backend. OpenDevin will then log the prompts and responses in the logs/llm/CURRENT_DATE directory, allowing you to identify the causes. +If you encounter any issues with the Language Model (LM) or you're simply curious, you can inspect the actual LLM prompts and responses. To do so, export DEBUG=1 in the environment and restart the backend. +OpenDevin will then log the prompts and responses in the logs/llm/CURRENT_DATE directory, allowing you to identify the causes. ### 7. Help - -- **Get Some Help:** Need assistance or information on available targets and commands? The help command provides all the necessary guidance to ensure a smooth experience with OpenDevin. - ```bash - make help - ``` +Need assistance or information on available targets and commands? The help command provides all the necessary guidance to ensure a smooth experience with OpenDevin. +```bash +make help + ``` ### 8. Testing - +To run tests, refer to the following: #### Unit tests ```bash -poetry run pytest ./tests/unit/test_sandbox.py +poetry run pytest ./tests/unit/test_*.py ``` #### Integration tests - Please refer to [this README](./tests/integration/README.md) for details. ### 9. Add or update dependency - -1. Add your dependency in `pyproject.toml` or use `peotry add xxx` +1. Add your dependency in `pyproject.toml` or use `poetry add xxx` 2. Update the poetry.lock file via `poetry lock --no-update` diff --git a/ISSUE_TRIAGE.md b/ISSUE_TRIAGE.md new file mode 100644 index 000000000000..f251ad7263fb --- /dev/null +++ b/ISSUE_TRIAGE.md @@ -0,0 +1,25 @@ +# Issue Triage +These are the procedures and guidelines on how issues are triaged in this repo by the maintainers. + +## General +* Most issues must be tagged with **enhancement** or **bug** +* Issues may be tagged with what it relates to (**backend**, **frontend**, **agent quality**, etc.) + +## Severity +* **Low**: Minor issues, single user report +* **Medium**: Affecting multiple users +* **Critical**: Affecting all users or potential security issues + +## Effort +* Issues may be estimated with effort required (**small effort**, **medium effort**, **large effort**) + +## Difficulty +* Issues with low implementation difficulty may be tagged with **good first issue** + +## Not Enough Information +* User is asked to provide more information (logs, how to reproduce, etc.) when the issue is not clear +* If an issue is unclear and the author does not provide more information or respond to a request, the issue may be closed as **not planned** (Usually after a week) + +## Multiple Requests/Fixes in One Issue +* These issues will be narrowed down to one request/fix so the issue is more easily tracked and fixed +* Issues may be broken down into multiple issues if required diff --git a/Makefile b/Makefile index 49ad4af5572c..2e06fa01d2a8 100644 --- a/Makefile +++ b/Makefile @@ -2,14 +2,15 @@ SHELL=/bin/bash # Makefile for OpenDevin project # Variables -DOCKER_IMAGE = ghcr.io/opendevin/sandbox +DOCKER_IMAGE = ghcr.io/opendevin/sandbox:main BACKEND_PORT = 3000 BACKEND_HOST = "127.0.0.1:$(BACKEND_PORT)" FRONTEND_PORT = 3001 DEFAULT_WORKSPACE_DIR = "./workspace" -DEFAULT_MODEL = "gpt-3.5-turbo" +DEFAULT_MODEL = "gpt-4o" CONFIG_FILE = config.toml -PRECOMMIT_CONFIG_PATH = "./dev_config/python/.pre-commit-config.yaml" +PRE_COMMIT_CONFIG_PATH = "./dev_config/python/.pre-commit-config.yaml" +PYTHON_VERSION = 3.11 # ANSI color codes GREEN=$(shell tput -Txterm setaf 2) @@ -27,7 +28,7 @@ ifeq ($(INSTALL_DOCKER),) endif @$(MAKE) -s install-python-dependencies @$(MAKE) -s install-frontend-dependencies - @$(MAKE) -s install-precommit-hooks + @$(MAKE) -s install-pre-commit-hooks @$(MAKE) -s build-frontend @echo "$(GREEN)Build completed successfully.$(RESET)" @@ -62,10 +63,10 @@ check-system: check-python: @echo "$(YELLOW)Checking Python installation...$(RESET)" - @if command -v python3.11 > /dev/null; then \ - echo "$(BLUE)$(shell python3.11 --version) is already installed.$(RESET)"; \ + @if command -v python$(PYTHON_VERSION) > /dev/null; then \ + echo "$(BLUE)$(shell python$(PYTHON_VERSION) --version) is already installed.$(RESET)"; \ else \ - echo "$(RED)Python 3.11 is not installed. Please install Python 3.11 to continue.$(RESET)"; \ + echo "$(RED)Python $(PYTHON_VERSION) is not installed. Please install Python $(PYTHON_VERSION) to continue.$(RESET)"; \ exit 1; \ fi @@ -112,13 +113,13 @@ check-poetry: echo "$(BLUE)$(shell poetry --version) is already installed.$(RESET)"; \ else \ echo "$(RED)Poetry 1.8 or later is required. You can install poetry by running the following command, then adding Poetry to your PATH:"; \ - echo "$(RED) curl -sSL https://install.python-poetry.org | python3 -$(RESET)"; \ + echo "$(RED) curl -sSL https://install.python-poetry.org | python$(PYTHON_VERSION) -$(RESET)"; \ echo "$(RED)More detail here: https://python-poetry.org/docs/#installing-with-the-official-installer$(RESET)"; \ exit 1; \ fi; \ else \ echo "$(RED)Poetry is not installed. You can install poetry by running the following command, then adding Poetry to your PATH:"; \ - echo "$(RED) curl -sSL https://install.python-poetry.org | python3.11 -$(RESET)"; \ + echo "$(RED) curl -sSL https://install.python-poetry.org | python$(PYTHON_VERSION) -$(RESET)"; \ echo "$(RED)More detail here: https://python-poetry.org/docs/#installing-with-the-official-installer$(RESET)"; \ exit 1; \ fi @@ -130,7 +131,11 @@ pull-docker-image: install-python-dependencies: @echo "$(GREEN)Installing Python dependencies...$(RESET)" - poetry env use python3.11 + @if [ -z "${TZ}" ]; then \ + echo "Defaulting TZ (timezone) to UTC"; \ + export TZ="UTC"; \ + fi + poetry env use python$(PYTHON_VERSION) @if [ "$(shell uname)" = "Darwin" ]; then \ echo "$(BLUE)Installing chroma-hnswlib...$(RESET)"; \ export HNSWLIB_NO_NATIVE=1; \ @@ -142,7 +147,14 @@ install-python-dependencies: poetry run pip install playwright; \ poetry run playwright install chromium; \ else \ - poetry run playwright install --with-deps chromium; \ + if [ ! -f cache/playwright_chromium_is_installed.txt ]; then \ + echo "Running playwright install --with-deps chromium..."; \ + poetry run playwright install --with-deps chromium; \ + mkdir -p cache; \ + touch cache/playwright_chromium_is_installed.txt; \ + else \ + echo "Setup already done. Skipping playwright installation."; \ + fi \ fi @echo "$(GREEN)Python dependencies installed successfully.$(RESET)" @@ -150,22 +162,19 @@ install-frontend-dependencies: @echo "$(YELLOW)Setting up frontend environment...$(RESET)" @echo "$(YELLOW)Detect Node.js version...$(RESET)" @cd frontend && node ./scripts/detect-node-version.js - @cd frontend && \ - echo "$(BLUE)Installing frontend dependencies with npm...$(RESET)" && \ - npm install && \ - echo "$(BLUE)Running make-i18n with npm...$(RESET)" && \ - npm run make-i18n + echo "$(BLUE)Installing frontend dependencies with npm...$(RESET)" + @cd frontend && npm install @echo "$(GREEN)Frontend dependencies installed successfully.$(RESET)" -install-precommit-hooks: +install-pre-commit-hooks: @echo "$(YELLOW)Installing pre-commit hooks...$(RESET)" @git config --unset-all core.hooksPath || true - @poetry run pre-commit install --config $(PRECOMMIT_CONFIG_PATH) + @poetry run pre-commit install --config $(PRE_COMMIT_CONFIG_PATH) @echo "$(GREEN)Pre-commit hooks installed successfully.$(RESET)" lint-backend: @echo "$(YELLOW)Running linters...$(RESET)" - @poetry run pre-commit run --files $$(git diff --name-only $$(git merge-base main $$(git branch --show-current)) $$(git branch --show-current) | tr '\n' ' ') --show-diff-on-failure --config $(PRECOMMIT_CONFIG_PATH) + @poetry run pre-commit run --files opendevin/**/* agenthub/**/* evaluation/**/* --show-diff-on-failure --config $(PRE_COMMIT_CONFIG_PATH) lint-frontend: @echo "$(YELLOW)Running linters for frontend...$(RESET)" @@ -196,11 +205,10 @@ start-frontend: @echo "$(YELLOW)Starting frontend...$(RESET)" @cd frontend && VITE_BACKEND_HOST=$(BACKEND_HOST) VITE_FRONTEND_PORT=$(FRONTEND_PORT) npm run start -# Run the app -run: - @echo "$(YELLOW)Running the app...$(RESET)" +# Common setup for running the app (non-callable) +_run_setup: @if [ "$(OS)" = "Windows_NT" ]; then \ - echo "$(RED)`make run` is not supported on Windows. Please run `make start-frontend` and `make start-backend` separately.$(RESET)"; \ + echo "$(RED) Windows is not supported, use WSL instead!$(RESET)"; \ exit 1; \ fi @mkdir -p logs @@ -209,9 +217,21 @@ run: @echo "$(YELLOW)Waiting for the backend to start...$(RESET)" @until nc -z localhost $(BACKEND_PORT); do sleep 0.1; done @echo "$(GREEN)Backend started successfully.$(RESET)" + +# Run the app (standard mode) +run: + @echo "$(YELLOW)Running the app...$(RESET)" + @$(MAKE) -s _run_setup @cd frontend && echo "$(BLUE)Starting frontend with npm...$(RESET)" && npm run start -- --port $(FRONTEND_PORT) @echo "$(GREEN)Application started successfully.$(RESET)" +# Run the app (WSL mode) +run-wsl: + @echo "$(YELLOW)Running the app in WSL mode...$(RESET)" + @$(MAKE) -s _run_setup + @cd frontend && echo "$(BLUE)Starting frontend with npm (WSL mode)...$(RESET)" && npm run dev_wsl -- --port $(FRONTEND_PORT) + @echo "$(GREEN)Application started successfully in WSL mode.$(RESET)" + # Setup config.toml setup-config: @echo "$(YELLOW)Setting up config.toml...$(RESET)" @@ -222,10 +242,20 @@ setup-config: setup-config-prompts: @echo "[core]" > $(CONFIG_FILE).tmp - @read -p "Enter your workspace directory [default: $(DEFAULT_WORKSPACE_DIR)]: " workspace_dir; \ + @read -p "Enter your workspace directory (as absolute path) [default: $(DEFAULT_WORKSPACE_DIR)]: " workspace_dir; \ workspace_dir=$${workspace_dir:-$(DEFAULT_WORKSPACE_DIR)}; \ echo "workspace_base=\"$$workspace_dir\"" >> $(CONFIG_FILE).tmp + @read -p "Do you want to persist the sandbox container? [true/false] [default: false]: " persist_sandbox; \ + persist_sandbox=$${persist_sandbox:-false}; \ + if [ "$$persist_sandbox" = "true" ]; then \ + read -p "Enter a password for the sandbox container: " ssh_password; \ + echo "ssh_password=\"$$ssh_password\"" >> $(CONFIG_FILE).tmp; \ + echo "persist_sandbox=$$persist_sandbox" >> $(CONFIG_FILE).tmp; \ + else \ + echo "persist_sandbox=$$persist_sandbox" >> $(CONFIG_FILE).tmp; \ + fi + @echo "" >> $(CONFIG_FILE).tmp @echo "[llm]" >> $(CONFIG_FILE).tmp @@ -286,4 +316,4 @@ help: @echo " $(GREEN)help$(RESET) - Display this help message, providing information on available targets." # Phony targets -.PHONY: build check-dependencies check-python check-npm check-docker check-poetry pull-docker-image install-python-dependencies install-frontend-dependencies install-precommit-hooks lint start-backend start-frontend run setup-config setup-config-prompts help +.PHONY: build check-dependencies check-python check-npm check-docker check-poetry pull-docker-image install-python-dependencies install-frontend-dependencies install-pre-commit-hooks lint start-backend start-frontend run run-wsl setup-config setup-config-prompts help diff --git a/README.md b/README.md index f89ec14689c7..0057a2eb3d2d 100644 --- a/README.md +++ b/README.md @@ -26,8 +26,6 @@
Join our Slack community Join our Discord community -
- SWE-bench CodeCov @@ -35,7 +33,8 @@
Logo

OpenDevin: Code Less, Make More

- Check out the documentation + Check out the documentation + Evaluation Benchmark

@@ -46,39 +45,49 @@ OpenDevin agents collaborate with human developers to write code, fix bugs, and ![App screenshot](./docs/static/img/screenshot.png) ## ⚡ Getting Started -The easiest way to run OpenDevin is inside a Docker container. It works best with the most recent version of Docker, `26.0.0`. +OpenDevin works best with Docker version 26.0.0+ (Docker Desktop 4.31.0+). You must be using Linux, Mac OS, or WSL on Windows. -To start the app, run these commands, replacing `$(pwd)/workspace` with the directory you want OpenDevin to work with. +To start OpenDevin in a docker container, run the following commands in your terminal: > [!WARNING] -> OpenDevin runs bash commands within a Docker sandbox, so it should not affect your machine. -> But your workspace directory will be attached to that sandbox, and files in the directory may be modified or deleted. - -```bash -# The directory you want OpenDevin to work with. MUST be an absolute path! -export WORKSPACE_BASE=$(pwd)/workspace; -``` +> When you run the following command, files in `./workspace` may be modified or deleted. ```bash +WORKSPACE_BASE=$(pwd)/workspace docker run -it \ --pull=always \ -e SANDBOX_USER_ID=$(id -u) \ - -e SSH_PASSWORD="make something up here" \ -e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \ -v $WORKSPACE_BASE:/opt/workspace_base \ -v /var/run/docker.sock:/var/run/docker.sock \ -p 3000:3000 \ --add-host host.docker.internal:host-gateway \ - ghcr.io/opendevin/opendevin:0.5 + --name opendevin-app-$(date +%Y%m%d%H%M%S) \ + ghcr.io/opendevin/opendevin ``` -You'll find OpenDevin running at [http://localhost:3000](http://localhost:3000). +> [!NOTE] +> By default, this command pulls the `latest` tag, which represents the most recent release of OpenDevin. You have other options as well: +> - For a specific release version, use `ghcr.io/opendevin/opendevin:` (replace with the desired version number). +> - For the most up-to-date development version, use `ghcr.io/opendevin/opendevin:main`. This version may be **(unstable!)** and is recommended for testing or development purposes only. +> +> Choose the tag that best suits your needs based on stability requirements and desired features. + +You'll find OpenDevin running at [http://localhost:3000](http://localhost:3000) with access to `./workspace`. To have OpenDevin operate on your code, place it in `./workspace`. +OpenDevin will only have access to this workspace folder. The rest of your system will not be affected as it runs in a secured docker sandbox. + +Upon opening OpenDevin, you must select the appropriate `Model` and enter the `API Key` within the settings that should pop up automatically. These can be set at any time by selecting +the `Settings` button (gear icon) in the UI. If the required `Model` does not exist in the list, you can manually enter it in the text box. + +For the development workflow, see [Development.md](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md). + +Are you having trouble? Check out our [Troubleshooting Guide](https://opendevin.github.io/OpenDevin/modules/usage/troubleshooting). ## 🚀 Documentation To learn more about the project, and for tips on using OpenDevin, -**check out our [documentation](https://opendevin.github.io/OpenDevin/)**. +**check out our [documentation](https://opendevin.github.io/OpenDevin/modules/usage/intro)**. There you'll find resources on how to use different LLM providers (like ollama and Anthropic's Claude), troubleshooting resources, and advanced configuration options. @@ -100,15 +109,10 @@ For details, please check [CONTRIBUTING.md](./CONTRIBUTING.md). Whether you're a developer, a researcher, or simply enthusiastic about OpenDevin, we'd love to have you in our community. Let's make software engineering better together! -- [Slack workspace](https://join.slack.com/t/opendevin/shared_invite/zt-2ggtwn3k5-PvAA2LUmqGHVZ~XzGq~ILw) - Here we talk about research, architecture, and future development. +- [Slack workspace](https://join.slack.com/t/opendevin/shared_invite/zt-2jsrl32uf-fTeeFjNyNYxqSZt5NPY3fA) - Here we talk about research, architecture, and future development. - [Discord server](https://discord.gg/ESHStjSjD4) - This is a community-run server for general discussion, questions, and feedback. ## 📈 Progress -

- - SWE-Bench Lite Score - -

@@ -130,3 +134,16 @@ Distributed under the MIT License. See [`LICENSE`](./LICENSE) for more informati [issues-url]: https://github.com/OpenDevin/OpenDevin/issues [license-shield]: https://img.shields.io/github/license/opendevin/opendevin?style=for-the-badge [license-url]: https://github.com/OpenDevin/OpenDevin/blob/main/LICENSE + +## 📚 Cite + +``` +@misc{opendevin2024, + author = {{OpenDevin Team}}, + title = {{OpenDevin: An Open Platform for AI Software Developers as Generalist Agents}}, + year = {2024}, + version = {v1.0}, + howpublished = {\url{https://github.com/OpenDevin/OpenDevin}}, + note = {Accessed: ENTER THE DATE YOU ACCESSED THE PROJECT} +} +``` diff --git a/agenthub/README.md b/agenthub/README.md index 6dd6b54c8399..1d406916d358 100644 --- a/agenthub/README.md +++ b/agenthub/README.md @@ -2,15 +2,15 @@ In this folder, there may exist multiple implementations of `Agent` that will be used by the framework. -For example, `agenthub/monologue_agent`, `agenthub/metagpt_agent`, `agenthub/codeact_agent`, etc. +For example, `agenthub/codeact_agent`, etc. Contributors from different backgrounds and interests can choose to contribute to any (or all!) of these directions. ## Constructing an Agent -The abstraction for an agent can be found [here](../opendevin/agent.py). +The abstraction for an agent can be found [here](../opendevin/controller/agent.py). Agents are run inside of a loop. At each iteration, `agent.step()` is called with a -[State](../opendevin/state.py) input, and the agent must output an [Action](../opendevin/action). +[State](../opendevin/controller/state/state.py) input, and the agent must output an [Action](../opendevin/events/action). Every agent also has a `self.llm` which it can use to interact with the LLM configured by the user. See the [LiteLLM docs for `self.llm.completion`](https://docs.litellm.ai/docs/completion). @@ -28,39 +28,33 @@ The `state` contains: Here is a list of available Actions, which can be returned by `agent.step()`: -- [`CmdRunAction`](../opendevin/action/bash.py) - Runs a command inside a sandboxed terminal -- [`CmdKillAction`](../opendevin/action/bash.py) - Kills a background command -- [`IPythonRunCellAction`](../opendevin/action/bash.py) - Execute a block of Python code interactively (in Jupyter notebook) and receives `CmdOutputObservation`. Requires setting up `jupyter` [plugin](../opendevin/sandbox/plugins) as a requirement. -- [`FileReadAction`](../opendevin/action/fileop.py) - Reads the content of a file -- [`FileWriteAction`](../opendevin/action/fileop.py) - Writes new content to a file -- [`BrowseURLAction`](../opendevin/action/browse.py) - Gets the content of a URL -- [`AgentRecallAction`](../opendevin/action/agent.py) - Searches memory (e.g. a vector database) -- [`AddTaskAction`](../opendevin/action/tasks.py) - Adds a subtask to the plan -- [`ModifyTaskAction`](../opendevin/action/tasks.py) - Changes the state of a subtask -- [`AgentThinkAction`](../opendevin/action/agent.py) - A no-op that allows the agent to add plaintext to the history (as well as the chat log) -- [`AgentTalkAction`](../opendevin/action/agent.py) - A no-op that allows the agent to add plaintext to the history and talk to the user. -- [`AgentFinishAction`](../opendevin/action/agent.py) - Stops the control loop, allowing the user/delegator agent to enter a new task -- [`AgentRejectAction`](../opendevin/action/agent.py) - Stops the control loop, allowing the user/delegator agent to enter a new task -- [`AgentFinishAction`](../opendevin/action/agent.py) - Stops the control loop, allowing the user to enter a new task -- [`MessageAction`](../opendevin/action/message.py) - Represents a message from an agent or the user +- [`CmdRunAction`](../opendevin/events/action/commands.py) - Runs a command inside a sandboxed terminal +- [`IPythonRunCellAction`](../opendevin/events/action/commands.py) - Execute a block of Python code interactively (in Jupyter notebook) and receives `CmdOutputObservation`. Requires setting up `jupyter` [plugin](../opendevin/runtime/plugins) as a requirement. +- [`FileReadAction`](../opendevin/events/action/files.py) - Reads the content of a file +- [`FileWriteAction`](../opendevin/events/action/files.py) - Writes new content to a file +- [`BrowseURLAction`](../opendevin/events/action/browse.py) - Gets the content of a URL +- [`AddTaskAction`](../opendevin/events/action/tasks.py) - Adds a subtask to the plan +- [`ModifyTaskAction`](../opendevin/events/action/tasks.py) - Changes the state of a subtask. +- [`AgentFinishAction`](../opendevin/events/action/agent.py) - Stops the control loop, allowing the user/delegator agent to enter a new task +- [`AgentRejectAction`](../opendevin/events/action/agent.py) - Stops the control loop, allowing the user/delegator agent to enter a new task +- [`AgentFinishAction`](../opendevin/events/action/agent.py) - Stops the control loop, allowing the user to enter a new task +- [`MessageAction`](../opendevin/events/action/message.py) - Represents a message from an agent or the user You can use `action.to_dict()` and `action_from_dict` to serialize and deserialize actions. ## Observations There are also several types of Observations. These are typically available in the step following the corresponding Action. -But they may also appear as a result of asynchronous events (e.g. a message from the user, logs from a command running -in the background). +But they may also appear as a result of asynchronous events (e.g. a message from the user). Here is a list of available Observations: -- [`CmdOutputObservation`](../opendevin/observation/run.py) -- [`BrowserOutputObservation`](../opendevin/observation/browse.py) -- [`FileReadObservation`](../opendevin/observation/files.py) -- [`FileWriteObservation`](../opendevin/observation/files.py) -- [`AgentRecallObservation`](../opendevin/observation/recall.py) -- [`ErrorObservation`](../opendevin/observation/error.py) -- [`SuccessObservation`](../opendevin/observation/success.py) +- [`CmdOutputObservation`](../opendevin/events/observation/commands.py) +- [`BrowserOutputObservation`](../opendevin/events/observation/browse.py) +- [`FileReadObservation`](../opendevin/events/observation/files.py) +- [`FileWriteObservation`](../opendevin/events/observation/files.py) +- [`ErrorObservation`](../opendevin/events/observation/error.py) +- [`SuccessObservation`](../opendevin/events/observation/success.py) You can use `observation.to_dict()` and `observation_from_dict` to serialize and deserialize observations. @@ -76,14 +70,3 @@ def step(self, state: "State") -> "Action" `step` moves the agent forward one step towards its goal. This probably means sending a prompt to the LLM, then parsing the response into an `Action`. - -### `search_memory` - -``` -def search_memory(self, query: str) -> list[str]: -``` - -`search_memory` should return a list of events that match the query. This will be used -for the `recall` action. - -You can optionally just return `[]` for this method, meaning the agent has no long-term memory. diff --git a/agenthub/SWE_agent/Examples.md b/agenthub/SWE_agent/Examples.md deleted file mode 100644 index 885b612f9b26..000000000000 --- a/agenthub/SWE_agent/Examples.md +++ /dev/null @@ -1,405 +0,0 @@ -# GPT-4 Examples: - -### Notes: -- The final result is not completed as I only gave it 10 steps, but shows very promising use of the tools. -- This whole run costs: $0.35 using gpt-4-turbo - - Input Tokens: 29,734 - - Completion Tokens: 1,636 - -Interesting things: -1. Using comments as an initial placeholder for the next move and then having that output in memory is a very cool behavior. -2. It makes very small iterative changes due to the prompting. -3. A lot of the logic for the game is present but not put together well -4. There are a lot of odd quirks with the code that it has written. -5. Some odd agent behavior with putting random lines of code in random places? -6. The structure of the feedback loop needs some improvement so the agent can see the changes it has made better in context. -7. Might be beneficial to add in some line numbers so the model knows what lines it is looking at. - - -``` -============== -STEP 0 - -PLAN: -Please make conway's game of life in python using the pygame library. Be sure to use the tools in your workspace to help you out. -Usage(completion_tokens=91, prompt_tokens=2047, total_tokens=2138) - -==== RAW OUTPUT ==== -Thoughts: -To start implementing Conway's Game of Life in Python using the pygame library, I first need to set up a Python script file where I can write the code. I'll create a new Python file named `game_of_life.py`. This file will serve as the base for writing the game logic and using pygame functionalities. - -Action: -""" -write game_of_life.py # This is a placeholder for the game of life implementation using pygame. -""" - -==== END RAW ==== - - -ACTION: -FileWriteAction(path='game_of_life.py', content='# This is a placeholder for the game of life implementation using pygame.', start=0, end=-1, thoughts="Thoughts:\nTo start implementing Conway's Game of Life in Python using the pygame library, I first need to set up a Python script file where I can write the code. I'll create a new Python file named `game_of_life.py`. This file will serve as the base for writing the game logic and using pygame functionalities.\n\nAction:", action=) - -OBSERVATION: -WRITE OPERATION: -You have written to "game_of_life.py" on these lines: 0:-1. - - -============== -STEP 1 - -PLAN: -Please make conway's game of life in python using the pygame library. Be sure to use the tools in your workspace to help you out. -Usage(completion_tokens=101, prompt_tokens=2351, total_tokens=2452) - -==== RAW OUTPUT ==== -Thoughts: -The placeholder for the game of life implementation using pygame has been set up in the file `game_of_life.py`. The next step is to start implementing the basic structure of the game. This includes importing necessary libraries, initializing the pygame module, setting up the display window, and defining the main game loop. I will begin by importing pygame and initializing it, which are the foundational steps for any pygame application. - -Action: -""" -edit 0 0 import pygame - -pygame.init() -""" - -==== END RAW ==== - - -ACTION: -FileWriteAction(path='game_of_life.py', content='import pygame\n\npygame.init()', start=0, end=0, thoughts='Thoughts:\nThe placeholder for the game of life implementation using pygame has been set up in the file `game_of_life.py`. The next step is to start implementing the basic structure of the game. This includes importing necessary libraries, initializing the pygame module, setting up the display window, and defining the main game loop. I will begin by importing pygame and initializing it, which are the foundational steps for any pygame application.\n\nAction:', action=) - -OBSERVATION: -WRITE OPERATION: -You have written to "game_of_life.py" on these lines: 0:0. - - -============== -STEP 2 - -PLAN: -Please make conway's game of life in python using the pygame library. Be sure to use the tools in your workspace to help you out. -Usage(completion_tokens=123, prompt_tokens=2552, total_tokens=2675) - -==== RAW OUTPUT ==== -Thoughts: -Previously, I initialized pygame in the `game_of_life.py` file. The next step is to set up the display window for the game. This involves defining the size of the window and setting the title. This is crucial as it provides the visual interface for the game. I will also add a comment to indicate that this is the setup for the display window. - -Action: -""" -edit 2 2 -# Setup the display window -screen_size = (800, 600) -screen = pygame.display.set_mode(screen_size) -pygame.display.set_caption("Conway's Game of Life") -""" -==== END RAW ==== - - -ACTION: -FileWriteAction(path='game_of_life.py', content='# Setup the display window\nscreen_size = (800, 600)\nscreen = pygame.display.set_mode(screen_size)\npygame.display.set_caption("Conway\'s Game of Life")', start=2, end=2, thoughts='Thoughts:\nPreviously, I initialized pygame in the `game_of_life.py` file. The next step is to set up the display window for the game. This involves defining the size of the window and setting the title. This is crucial as it provides the visual interface for the game. I will also add a comment to indicate that this is the setup for the display window.\n\nAction:', action=) - -OBSERVATION: -WRITE OPERATION: -You have written to "game_of_life.py" on these lines: 2:2. - - -============== -STEP 3 - -PLAN: -Please make conway's game of life in python using the pygame library. Be sure to use the tools in your workspace to help you out. -Usage(completion_tokens=144, prompt_tokens=2777, total_tokens=2921) - -==== RAW OUTPUT ==== -Thoughts: -So far, I have set up the basic structure for Conway's Game of Life using pygame, including initializing pygame and setting up the display window. The next step is to define the main game loop where the game's logic will be continuously updated and rendered. This loop will handle events, update the game state, and redraw the screen. I will start by defining the main loop structure and a basic event handling mechanism to allow for quitting the game when the user presses the close button. - -Action: -""" -edit 5 5 # Main game loop -running = True -while running: - for event in pygame.event.get(): - if event.type == pygame.QUIT: - running = False -""" - -==== END RAW ==== - - -ACTION: -FileWriteAction(path='game_of_life.py', content='# Main game loop\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False', start=5, end=5, thoughts="Thoughts:\nSo far, I have set up the basic structure for Conway's Game of Life using pygame, including initializing pygame and setting up the display window. The next step is to define the main game loop where the game's logic will be continuously updated and rendered. This loop will handle events, update the game state, and redraw the screen. I will start by defining the main loop structure and a basic event handling mechanism to allow for quitting the game when the user presses the close button.\n\nAction:", action=) - -OBSERVATION: -WRITE OPERATION: -You have written to "game_of_life.py" on these lines: 5:5. - - -============== -STEP 4 - -PLAN: -Please make conway's game of life in python using the pygame library. Be sure to use the tools in your workspace to help you out. -Usage(completion_tokens=222, prompt_tokens=3022, total_tokens=3244) - -==== RAW OUTPUT ==== -Thoughts: -So far, I have set up the basic structure for Conway's Game of Life using pygame, including initializing pygame, setting up the display window, and defining the main game loop. The next step is to implement the logic for Conway's Game of Life. This involves creating a grid to represent the game state, initializing it with a random pattern, and defining the rules for cell survival, birth, and death. I will start by defining the grid size and initializing the grid with random states. - -Action: -""" -edit 7 7 # Define grid size and initialize grid -grid_size = (50, 50) # Grid size as a tuple (rows, columns) -grid = [[0 for x in range(grid_size[1])] for y in range(grid_size[0])] # Initialize grid with zeros -import random -for y in range(grid_size[0]): - for x in range(grid_size[1]): - grid[y][x] = random.randint(0, 1) # Randomly assign cells as alive (1) or dead (0) -""" -==== END RAW ==== - - -ACTION: -FileWriteAction(path='game_of_life.py', content='# Define grid size and initialize grid\ngrid_size = (50, 50) # Grid size as a tuple (rows, columns)\ngrid = [[0 for x in range(grid_size[1])] for y in range(grid_size[0])] # Initialize grid with zeros\nimport random\nfor y in range(grid_size[0]):\n for x in range(grid_size[1]):\n grid[y][x] = random.randint(0, 1) # Randomly assign cells as alive (1) or dead (0)', start=7, end=7, thoughts="Thoughts:\nSo far, I have set up the basic structure for Conway's Game of Life using pygame, including initializing pygame, setting up the display window, and defining the main game loop. The next step is to implement the logic for Conway's Game of Life. This involves creating a grid to represent the game state, initializing it with a random pattern, and defining the rules for cell survival, birth, and death. I will start by defining the grid size and initializing the grid with random states.\n\nAction:", action=) - -OBSERVATION: -WRITE OPERATION: -You have written to "game_of_life.py" on these lines: 7:7. - - -============== -STEP 5 - -PLAN: -Please make conway's game of life in python using the pygame library. Be sure to use the tools in your workspace to help you out. -Usage(completion_tokens=234, prompt_tokens=3158, total_tokens=3392) - -==== RAW OUTPUT ==== -Thoughts: -So far, I have set up the basic structure for Conway's Game of Life using pygame, including initializing pygame, setting up the display window, defining the main game loop, and initializing the grid with random states. The next step is to implement the rules for Conway's Game of Life. This involves checking each cell's neighbors to determine its next state based on the number of alive neighbors. I will start by defining a function to count the alive neighbors for a given cell. - -Action: -""" -edit 9 9 def count_alive_neighbors(grid, x, y): - directions = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)] - count = 0 - for dx, dy in directions: - nx, ny = x + dx, y + dy - if 0 <= nx < grid_size[1] and 0 <= ny < grid_size[0]: - count += grid[ny][nx] - return count -""" - -==== END RAW ==== - - -ACTION: -FileWriteAction(path='game_of_life.py', content='def count_alive_neighbors(grid, x, y):\n directions = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]\n count = 0\n for dx, dy in directions:\n nx, ny = x + dx, y + dy\n if 0 <= nx < grid_size[1] and 0 <= ny < grid_size[0]:\n count += grid[ny][nx]\n return count', start=9, end=9, thoughts="Thoughts:\nSo far, I have set up the basic structure for Conway's Game of Life using pygame, including initializing pygame, setting up the display window, defining the main game loop, and initializing the grid with random states. The next step is to implement the rules for Conway's Game of Life. This involves checking each cell's neighbors to determine its next state based on the number of alive neighbors. I will start by defining a function to count the alive neighbors for a given cell.\n\nAction:", action=) - -OBSERVATION: -WRITE OPERATION: -You have written to "game_of_life.py" on these lines: 9:9. - - -============== -STEP 6 - -PLAN: -Please make conway's game of life in python using the pygame library. Be sure to use the tools in your workspace to help you out. -Usage(completion_tokens=257, prompt_tokens=3290, total_tokens=3547) - -==== RAW OUTPUT ==== -Thoughts: -So far, I have set up the basic structure for Conway's Game of Life using pygame, including initializing pygame, setting up the display window, defining the main game loop, initializing the grid with random states, and creating a function to count alive neighbors. The next step is to implement the rules for Conway's Game of Life. This involves updating the grid state based on the number of alive neighbors each cell has. I will define a function `update_grid` that applies the rules of the game to update the grid for the next generation. - -Action: -""" -write game_of_life.py def update_grid(grid): - new_grid = [[0 for x in range(grid_size[1])] for y in range(grid_size[0])] - for y in range(grid_size[0]): - for x in range(grid_size[1]): - alive_neighbors = count_alive_neighbors(grid, x, y) - if grid[y][x] == 1 and (alive_neighbors == 2 or alive_neighbors == 3): - new_grid[y][x] = 1 - elif grid[y][x] == 0 and alive_neighbors == 3: - new_grid[y][x] = 1 - return new_grid - 11 - 11 -""" -==== END RAW ==== - - -ACTION: -FileWriteAction(path='game_of_life.py', content='def update_grid(grid):\n new_grid = [[0 for x in range(grid_size[1])] for y in range(grid_size[0])]\n for y in range(grid_size[0]):\n for x in range(grid_size[1]):\n alive_neighbors = count_alive_neighbors(grid, x, y)\n if grid[y][x] == 1 and (alive_neighbors == 2 or alive_neighbors == 3):\n new_grid[y][x] = 1\n elif grid[y][x] == 0 and alive_neighbors == 3:\n new_grid[y][x] = 1\n return new_grid', start=11, end=11, thoughts="Thoughts:\nSo far, I have set up the basic structure for Conway's Game of Life using pygame, including initializing pygame, setting up the display window, defining the main game loop, initializing the grid with random states, and creating a function to count alive neighbors. The next step is to implement the rules for Conway's Game of Life. This involves updating the grid state based on the number of alive neighbors each cell has. I will define a function `update_grid` that applies the rules of the game to update the grid for the next generation.\n\nAction:", action=) - -OBSERVATION: -WRITE OPERATION: -You have written to "game_of_life.py" on these lines: 11:11. - - -============== -STEP 7 - -PLAN: -Please make conway's game of life in python using the pygame library. Be sure to use the tools in your workspace to help you out. -Usage(completion_tokens=124, prompt_tokens=3416, total_tokens=3540) - -==== RAW OUTPUT ==== -Thoughts: -So far, I have set up the basic structure for Conway's Game of Life using pygame, including initializing pygame, setting up the display window, defining the main game loop, initializing the grid with random states, and creating functions to count alive neighbors and update the grid. The next step is to integrate the `update_grid` function into the main game loop to continuously update the state of the grid based on the game rules. This will allow the game to evolve over time, showing different generations of the cell population. - -Action: -""" -edit 6 6 - grid = update_grid(grid) -""" -==== END RAW ==== - - -ACTION: -FileWriteAction(path='game_of_life.py', content='grid = update_grid(grid)', start=6, end=6, thoughts="Thoughts:\nSo far, I have set up the basic structure for Conway's Game of Life using pygame, including initializing pygame, setting up the display window, defining the main game loop, initializing the grid with random states, and creating functions to count alive neighbors and update the grid. The next step is to integrate the `update_grid` function into the main game loop to continuously update the state of the grid based on the game rules. This will allow the game to evolve over time, showing different generations of the cell population.\n\nAction:", action=) - -OBSERVATION: -WRITE OPERATION: -You have written to "game_of_life.py" on these lines: 6:6. - - -============== -STEP 8 - -PLAN: -Please make conway's game of life in python using the pygame library. Be sure to use the tools in your workspace to help you out. -Usage(completion_tokens=166, prompt_tokens=3393, total_tokens=3559) - -==== RAW OUTPUT ==== -Thoughts: -So far, I have set up the basic structure for Conway's Game of Life using pygame, including initializing pygame, setting up the display window, defining the main game loop, initializing the grid with random states, and creating functions to count alive neighbors and update the grid. The next step is to integrate the `update_grid` function into the main game loop to continuously update the state of the grid based on the game rules. This will allow the game to evolve over time, showing different generations of the cell population. I need to ensure that the `update_grid` function is called within the main game loop and that the display is updated accordingly. I will start by checking the current content of the main game loop to see where to integrate the `update_grid` function. - -Action: -""" -goto 6 -""" -==== END RAW ==== - - -ACTION: -FileReadAction(path='game_of_life.py', start=6, end=106, thoughts="Thoughts:\nSo far, I have set up the basic structure for Conway's Game of Life using pygame, including initializing pygame, setting up the display window, defining the main game loop, initializing the grid with random states, and creating functions to count alive neighbors and update the grid. The next step is to integrate the `update_grid` function into the main game loop to continuously update the state of the grid based on the game rules. This will allow the game to evolve over time, showing different generations of the cell population. I need to ensure that the `update_grid` function is called within the main game loop and that the display is updated accordingly. I will start by checking the current content of the main game loop to see where to integrate the `update_grid` function.\n\nAction:", action=) - -OBSERVATION: -grid = update_grid(grid) -running = True -# Define grid size and initialize grid -grid_size = (50, 50) # Grid size as a tuple (rows, columns) -def count_alive_neighbors(grid, x, y): - directions = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)] -def update_grid(grid): - new_grid = [[0 for x in range(grid_size[1])] for y in range(grid_size[0])] - for y in range(grid_size[0]): - for x in range(grid_size[1]): - alive_neighbors = count_alive_neighbors(grid, x, y) - if grid[y][x] == 1 and (alive_neighbors == 2 or alive_neighbors == 3): - new_grid[y][x] = 1 - elif grid[y][x] == 0 and alive_neighbors == 3: - new_grid[y][x] = 1 - return new_grid - count = 0 - for dx, dy in directions: - nx, ny = x + dx, y + dy - if 0 <= nx < grid_size[1] and 0 <= ny < grid_size[0]: - count += grid[ny][nx] - return count -grid = [[0 for x in range(grid_size[1])] for y in range(grid_size[0])] # Initialize grid with zeros -import random -for y in range(grid_size[0]): - for x in range(grid_size[1]): - grid[y][x] = random.randint(0, 1) # Randomly assign cells as alive (1) or dead (0) -while running: - for event in pygame.event.get(): - if event.type == pygame.QUIT: - running = False -pygame.display.set_caption("Conway's Game of Life") -pygame.init() - - - -============== -STEP 9 - -PLAN: -Please make conway's game of life in python using the pygame library. Be sure to use the tools in your workspace to help you out. -Usage(completion_tokens=174, prompt_tokens=3728, total_tokens=3902) - -==== RAW OUTPUT ==== -Thoughts: -From the previous actions, it seems that the basic structure for Conway's Game of Life using pygame has been set up, including initializing pygame, setting up the display window, defining the main game loop, initializing the grid with random states, and creating functions to count alive neighbors and update the grid. The next step is to ensure that the game loop properly updates the grid and refreshes the display to show the new state of the grid in each iteration. This involves calling `pygame.display.update()` to update the entire screen and using `pygame.time.Clock()` to control the frame rate. I will start by adding these necessary updates to the game loop. - -Action: -""" -edit 95 95 - pygame.display.update() - clock = pygame.time.Clock() - clock.tick(10) # Control the frame rate (10 frames per second) -""" - -==== END RAW ==== - - -ACTION: -FileWriteAction(path='game_of_life.py', content='pygame.display.update()\n clock = pygame.time.Clock()\n clock.tick(10) # Control the frame rate (10 frames per second)', start=95, end=95, thoughts="Thoughts:\nFrom the previous actions, it seems that the basic structure for Conway's Game of Life using pygame has been set up, including initializing pygame, setting up the display window, defining the main game loop, initializing the grid with random states, and creating functions to count alive neighbors and update the grid. The next step is to ensure that the game loop properly updates the grid and refreshes the display to show the new state of the grid in each iteration. This involves calling `pygame.display.update()` to update the entire screen and using `pygame.time.Clock()` to control the frame rate. I will start by adding these necessary updates to the game loop.\n\nAction:", action=) - -OBSERVATION: -WRITE OPERATION: -You have written to "game_of_life.py" on these lines: 95:95. -16:43:55 - opendevin:INFO: agent_controller.py:125 - Exited before finishing the task. - - -``` - -## Result: - -``` -import pygame - -# Setup the display window -screen_size = (800, 600) -screen = pygame.display.set_mode(screen_size) -# Main game loop -grid = update_grid(grid) -running = True -# Define grid size and initialize grid -grid_size = (50, 50) # Grid size as a tuple (rows, columns) -def count_alive_neighbors(grid, x, y): - directions = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)] -def update_grid(grid): - new_grid = [[0 for x in range(grid_size[1])] for y in range(grid_size[0])] - for y in range(grid_size[0]): - for x in range(grid_size[1]): - alive_neighbors = count_alive_neighbors(grid, x, y) - if grid[y][x] == 1 and (alive_neighbors == 2 or alive_neighbors == 3): - new_grid[y][x] = 1 - elif grid[y][x] == 0 and alive_neighbors == 3: - new_grid[y][x] = 1 - return new_grid - count = 0 - for dx, dy in directions: - nx, ny = x + dx, y + dy - if 0 <= nx < grid_size[1] and 0 <= ny < grid_size[0]: - count += grid[ny][nx] - return count -grid = [[0 for x in range(grid_size[1])] for y in range(grid_size[0])] # Initialize grid with zeros -import random -for y in range(grid_size[0]): - for x in range(grid_size[1]): - grid[y][x] = random.randint(0, 1) # Randomly assign cells as alive (1) or dead (0) -while running: - for event in pygame.event.get(): - if event.type == pygame.QUIT: - running = False -pygame.display.set_caption("Conway's Game of Life") -pygame.init() -# This is a placeholder for the game of life implementation using pygame.pygame.display.update() - clock = pygame.time.Clock() - clock.tick(10) # Control the frame rate (10 frames per second) - -``` diff --git a/agenthub/SWE_agent/__init__.py b/agenthub/SWE_agent/__init__.py deleted file mode 100644 index 69a768808278..000000000000 --- a/agenthub/SWE_agent/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from opendevin.controller.agent import Agent - -from .agent import SWEAgent - -Agent.register('SWEAgent', SWEAgent) diff --git a/agenthub/SWE_agent/agent.py b/agenthub/SWE_agent/agent.py deleted file mode 100644 index b299363960b4..000000000000 --- a/agenthub/SWE_agent/agent.py +++ /dev/null @@ -1,115 +0,0 @@ -from opendevin.controller.agent import Agent -from opendevin.controller.state.state import State -from opendevin.events.action import ( - Action, - FileReadAction, - FileWriteAction, - MessageAction, -) -from opendevin.events.serialization.event import event_to_memory -from opendevin.llm.llm import LLM - -from .parser import parse_command -from .prompts import ( - CONTEXT_PROMPT, - MEMORY_FORMAT, - NO_ACTION, - STEP_PROMPT, - SYSTEM_MESSAGE, -) - - -class SWEAgent(Agent): - VERSION = '1.0' - DEPRECATED = True - """ - An attempt to recreate swe_agent with output parsing, prompting style, and Application Computer Interface (ACI). - - SWE-agent includes ACI functions like 'goto', 'search_for', 'edit', 'scroll', 'run' - """ - - def __init__(self, llm: LLM): - super().__init__(llm) - self.memory_window = 4 - self.max_retries = 2 - self.cur_file: str = '' - self.cur_line: int = 0 - - def _think_act(self, messages: list[dict]) -> tuple[Action, str]: - resp = self.llm.do_completion( - messages=messages, - temperature=0.05, - ) - action_resp = resp['choices'][0]['message']['content'] - print(f"\033[1m\033[91m{resp['usage']}\033[0m") - print( - '\n==== RAW OUTPUT ====', - f'\033[96m{action_resp}\033[0m', - '==== END RAW ====\n', - sep='\n', - ) - return parse_command(action_resp, self.cur_file, self.cur_line) - - def _update(self, action: Action) -> None: - if isinstance(action, (FileReadAction, FileWriteAction)): - self.cur_file = action.path - self.cur_line = action.start - - def step(self, state: State) -> Action: - """ - SWE-Agent step: - 1. Get context - past actions, custom commands, current step - 2. Perform think-act - prompt model for action and reasoning - 3. Catch errors - ensure model takes action (5 attempts max) - """ - # retrieve short term memories from state.history, up to memory_window - memory_window = min(self.memory_window, len(state.history)) - running_memory: list[str] = [] - for prev_action, obs in state.history[-memory_window:]: - running_memory.append( - MEMORY_FORMAT(event_to_memory(prev_action), event_to_memory(obs)) - ) - - goal = state.get_current_user_intent() - - # always in the prompt if they exist: file and line - prompt = STEP_PROMPT(goal, self.cur_file, self.cur_line) - - # prepare messages - msgs = [ - {'content': SYSTEM_MESSAGE, 'role': 'system'}, - {'content': prompt, 'role': 'user'}, - ] - - # insert memories - if len(running_memory) > 0: - context = CONTEXT_PROMPT(running_memory, self.memory_window) - msgs.insert(1, {'content': context, 'role': 'user'}) - # clrs = [''] * (len(msgs)-2) + ['\033[0;36m', '\033[0;35m'] - # print('\n\n'.join([c+m['content']+'\033[0m' for c, m in zip(clrs, msgs)])) - - # send it over - action, thought = self._think_act(messages=msgs) - - # be robust with malformed responses - start_msg_len = len(msgs) - while not action and len(msgs) < self.max_retries + start_msg_len: - error = NO_ACTION(thought) - error_msg = {'content': error, 'role': 'user'} - msgs.append(error_msg) - action, thought = self._think_act(messages=msgs) - - if not action: - action = MessageAction(thought) - - self._update(action) - self.latest_action = action - return action - - def search_memory(self, query: str) -> list[str]: - # return [item for item in self.running_memory if query in item] - raise NotImplementedError('Search_memory not implemented currently') - - def reset(self) -> None: - """Used to reset the agent""" - super().reset() diff --git a/agenthub/SWE_agent/parser.py b/agenthub/SWE_agent/parser.py deleted file mode 100644 index cc01ad5f932c..000000000000 --- a/agenthub/SWE_agent/parser.py +++ /dev/null @@ -1,167 +0,0 @@ -import re - -from opendevin.events.action import ( - Action, - AgentFinishAction, - BrowseURLAction, - CmdRunAction, - FileReadAction, - FileWriteAction, - MessageAction, -) - -from .prompts import COMMAND_USAGE, CUSTOM_DOCS - -# commands: exit, read, write, browse, kill, search_file, search_dir - -no_open_file_error = MessageAction( - 'You are not currently in a file. You can use the read command to open a file and then use goto to navigate through it.' -) - - -def invalid_error(cmd, docs): - return f"""ERROR: -Invalid command structure for -``` -{cmd} -``` -You may have caused this error by having multiple commands within your command block. -If so, try again by running only one of the commands: - -Try again using this format: -{COMMAND_USAGE[docs]} -""" - - -def get_action_from_string( - command_string: str, path: str, line: int, thoughts: str = '' -) -> Action | None: - """ - Parses the command string to find which command the agent wants to run - Converts the command into a proper Action and returns - """ - vars = command_string.split(' ') - cmd = vars[0] - args = [] if len(vars) == 1 else vars[1:] - - if 'exit' == cmd: - return AgentFinishAction() - - elif 'think' == cmd: - return MessageAction(' '.join(args)) - - elif 'scroll_up' == cmd: - if not path: - return no_open_file_error - return FileReadAction(path, line + 100, line + 200, thoughts) - - elif 'scroll_down' == cmd: - if not path: - return no_open_file_error - return FileReadAction(path, line - 100, line, thoughts) - - elif 'goto' == cmd: - if not path: - return no_open_file_error - rex = r'^goto\s+(\d+)$' - valid = re.match(rex, command_string) - if valid: - start = int(valid.group(1)) - end = start + 100 - return FileReadAction(path, start, end, thoughts) - else: - return MessageAction(invalid_error(command_string, 'goto')) - - elif 'edit' == cmd: - if not path: - return no_open_file_error - rex = r'^edit\s+(\d+)\s+(-?\d+)\s+(\S.*)$' - valid = re.match(rex, command_string, re.DOTALL) - if valid: - start = int(valid.group(1)) - end = int(valid.group(2)) - change = valid.group(3) - if '"' == change[-1] and '"' == change[0]: - change = change[1:-1] - return FileWriteAction(path, change, start, end, thoughts) - else: - return MessageAction(invalid_error(command_string, 'edit')) - - elif 'read' == cmd: - rex = r'^read\s+(\S+)(?:\s+(\d+))?(?:\s+(-?\d+))?$' - valid = re.match(rex, command_string, re.DOTALL) - if valid: - file = valid.group(1) - start_str = valid.group(2) - end_str = valid.group(3) - - start = 0 if not start_str else int(start_str) - end = -1 if not end_str else int(end_str) - - return FileReadAction(file, start, end, thoughts) - else: - return MessageAction(invalid_error(command_string, 'read')) - - elif 'write' == cmd: - rex = r'^write\s+(\S+)\s+(.*?)\s*(\d+)?\s*(-?\d+)?$' - valid = re.match(rex, command_string, re.DOTALL) - - if valid: - file = valid.group(1) - content = valid.group(2) - start_str = valid.group(3) - end_str = valid.group(4) - - start = 0 if not start_str else int(start_str) - end = -1 if not end_str else int(end_str) - - if '"' == content[-1] and '"' == content[0]: - content = content[1:-1] - - return FileWriteAction(file, content, start, end, thoughts) - else: - return MessageAction(invalid_error(command_string, 'write')) - - elif 'browse' == cmd: - return BrowseURLAction(args[0].strip()) - - elif cmd in ['search_file', 'search_dir', 'find_file']: - rex = r'^(search_file|search_dir|find_file)\s+(\S+)(?:\s+(\S+))?$' - valid = re.match(rex, command_string, re.DOTALL) - if valid: - return CmdRunAction(command_string) - else: - return MessageAction( - f'Invalid command structure for\n ```\n{command_string}\n```.\nTry again using this format:\n{CUSTOM_DOCS}' - ) - else: - # check bash command - obs = str(CmdRunAction(f'type {cmd}')) - if obs.split(':')[-1].strip() == 'not found': - # echo not found error for llm - return MessageAction(content=obs) - else: - # run valid command - return CmdRunAction(command_string) - - -def parse_command(input_str: str, path: str, line: int): - """ - Parses a given string and separates the command (enclosed in triple backticks) from any accompanying text. - - Args: - input_str (str): The input string to be parsed. - - Returns: - tuple: A tuple containing the command and the accompanying text (if any). - """ - input_str = input_str.strip() - if '```' in input_str: - parts = input_str.split('```') - command_str = parts[1].strip() - ind = 2 if len(parts) > 2 else 1 - accompanying_text = ''.join(parts[:-ind]).strip() - action = get_action_from_string(command_str, path, line, accompanying_text) - if action: - return action, accompanying_text - return None, input_str # used for retry diff --git a/agenthub/SWE_agent/prompts.py b/agenthub/SWE_agent/prompts.py deleted file mode 100644 index 6e082ebb04ff..000000000000 --- a/agenthub/SWE_agent/prompts.py +++ /dev/null @@ -1,208 +0,0 @@ -DEFAULT_COMMANDS_DICT = { - 'exit': 'Executed when task is complete', - 'read [] []': "Shows a given file's contents starting from up to . Default: start_line = 0, end_line = -1. By default the whole file will be read.", - 'write [] []': 'Modifies a by replacing the current lines between and with . Default start_line = 0 and end_line = -1. Calling this with no line args will replace the whole file.', - 'browse ': 'Returns the text version of any url, this can be useful to look up documentation or finding issues on github', - 'scroll_up': 'Takes no arguments. This will scroll up and show you the 100 lines above your current lines', - 'scroll_down': 'Takes no arguments. This will scroll down and show you the 100 lines below your current lines', - 'edit ': 'This will modify lines in the currently open file. use start_line and end_line to designate which lines to change and then write the multiline changes. Set end_line to -1 to denote the end of the file', - 'goto ': 'This will take you directly to a line and show you the 100 lines below it.', - ' ': 'You can use any bash command you need (cd, ls, rm, grep, dir, mv, wget, git, zip, etc.) with their arguments included', - 'pip install ': 'You can use this to import python packages. Make sure you include the correct package name when using this command.', - 'ls': 'Use the ls command to view all the files in your current directory, this is a good starting point.', - 'NOT ALLOWED': 'You cannot use interactive commands like python or node', -} - -COMMAND_USAGE = { - 'exit': 'Usage:\n```\nexit\n```\nExecuted when task is complete', - 'read': "Args:\n [] []\nUsage:\n```\nread file.py\n```\nor\n```\nread example.py \n```\nShows a given file's contents starting from up to . Default: start_line = 0, end_line = -1. by default the whole file will be read.", - 'write': 'Args:\n [] []\nUsage:\n```\nwrite "def main():\n print("This is line one")" 0 2\n```\nModifies a by replacing the current lines between and with . Default start_line = 0 and end_line = -1. Calling this with no line args will replace the whole file.', - 'edit': 'Args:\n \nUsage:\n```\nedit 0 1 import pandas as pd\n```\nThis will modify the current file you are in with the changes you make between the line numbers you designate', - 'goto': 'Args:\n\nUsage:\n```\ngoto \n```\nThis will show you the 100 lines below and including the line you specify within your current file.', - 'scroll_up': 'Usage:\n```\nscroll_up\n```\nThis will return the 100 lines above where you are currently at', - 'scroll_down': 'Usage:\n```\nscroll_down\n```\nThis will return the 100 line below where you are currently at', - 'browse': 'Args:\n\nUsage:\n```\nbrowse https://github.com/OpenDevin/OpenDevin\n```\nThis will fetch the Text elements from the given url and show them to you.', -} - -DEFAULT_COMMANDS = '\n'.join([k + ' - ' + v for k, v in DEFAULT_COMMANDS_DICT.items()]) - -# from opendevin.parse_commands import parse_command_file -# USE parse_command_file(filepath) to get the custom commands -CUSTOM_DOCS = None - -CUSTOM_COMMANDS = f"""Custom bash commands: -{CUSTOM_DOCS} -""" - -DOCUMENTATION = f"""DOCUMENTATION: -It is recommend that you use the commands provided for interacting with files and your directory because they have been specially built for you. -They will make it much easier for you to look at files and make changes. Using these commands will help you be better at your task. -You can open an file by using either the read or write operations. -- If a file already exists you should read it before making any changes. Use the `edit` command to make changes once you have read it. -- If you are creating a new file use the write command. Use the `edit` command to make changes once you have created the new file. - -Commands: -{DEFAULT_COMMANDS} -{CUSTOM_COMMANDS} - -The following commands require an open file to be used: edit, scroll_up, scroll_down, goto -To modify the current file use 'edit'. To move through the current file use 'goto' or 'scroll_up'/'scroll_down' -when using write and edit do not surround the code with any "" just write the code. -""" - -GENERAL_GUIDELINES = """INSTRUCTIONS: -Now, you're going to solve this issue on your own. You can use any bash commands or custom commands you wish to complete your task. Edit all the files you need to and run any checks or tests that you want. -Remember, YOU CAN ONLY ENTER ONE COMMAND AT A TIME. You should always wait for feedback after every command. -When you're satisfied with all of the changes you've made, you can indicate that you are done by running the exit command. -Note however that you cannot use any interactive session commands (e.g. python, vim, node) in this environment, but you can write scripts and run them. E.g. you can write a python script and then run it with `python .py`. - -NOTE ABOUT THE write COMMAND: Indentation really matters! When editing a file, make sure to insert appropriate indentation before each line! - -IMPORTANT TIPS: -1. Reproduce the bug: Always start by trying to replicate the bug that the issue discusses. If the issue includes code for reproducing the bug, we recommend that you re-implement that in your environment and run it to ensure you can reproduce the bug. Then, start trying to fix it. When you think you've fixed the bug, re-run the bug reproduction script to make sure that the issue has indeed been resolved. - If the bug reproduction script does not print anything when it successfully runs, we recommend adding a print("Script completed successfully, no errors.") command at the end of the file, so that you can be sure the script ran fine all the way through. -2. Try different commands: If you run a command and it doesn't work, try running a different command. A command that did not work once will not work the second time unless you modify it. -3. Navigate large files: If you open a file and need to get to an area around a specific line that is not in the first 100 lines, say line 583, you would use the 'read' command like this: 'read 583'. This is a much faster way to read through the file. -4. Handle input files: If the bug reproduction script requires inputting/reading a specific file, such as 'buggy-input.png', and you'd like to understand how to input that file, conduct a search in the existing repository code to see whether someone else has already done that. Do this by running the command: 'search_dir "buggy-input.png"'. If that doesn't work, use the Linux 'find' command. -5. Understand your context: Always make sure to look at the currently open file and the current working directory. The currently open file might be in a different directory than the working directory. -6. Verify your edits: When editing files, it is easy to accidentally specify a wrong line number or to write code with incorrect indentation. Always check the code after you issue an edit to make sure that it reflects what you wanted to accomplish. If it didn't, issue another command to fix it. -7. Thoroughly test your solution: After making any changes to fix a bug, be sure to thoroughly test your solution to ensure the bug has been resolved. Re-run the bug reproduction script and verify that the issue has been addressed. -""" - -RESPONSE_FORMAT = """RESPONSE FORMAT: -This is the format of the response you will make in order to solve the current issue. -You will be given multiple iterations to complete this task so break it into steps and solve them one by one. - -Your output must contain the following: -- First, thoughts about what your next action should be and plan it out. - - You will have a memory of your thoughts so you can use this to remember things for the next step. - - Use your thoughts to think about what you are currently doing, what you have done on prior steps and how that relates to solving the problem. -- Second, create a piece of code that will execute your next action based on the thoughts you have. - - Remember that you can only have one action for each thought, do not include multiple actions. - -Your code MUST be surrounded in triple back ticks EXACTLY like this: -``` - -``` - -Notes: -- Adhere to the format so that the program loop continues smoothly, it is very important to only give one command per output. -- DO NOT give more than one command within the triple backticks. This will just throw an error and nothing will happen as a result. -- Do not give multiple code blocks, if you do only the second one will be captured and run, this might give an error if the first one was necessary. -- To execute multiple commands you should write them down in your thoughts section so you can remember it on the next step and execute them then. -- The only commands you are not capable of executing are interactive commands like `python` or `node` by themselves. -- If you think that you have completed the task that has been given to you based on your previous actions and outputs then use ``` exit ``` as the command to let the system know that you are done. -- DO NOT make any copies of your previous memories, those will be provided to you at each step, making copies just wastes time and energy. Think smarter not harder. -- The write and edit commands requires proper indentation in the content section ex. `write hw.py def hello():\n print(\'Hello World\')` this is how you would have to format your write command. - - The white spaces matter as the code changes will be added to the code so they must have proper syntax. - -This is a template using the format described above -Items in <> are suggestions for you, fill them out based on the context of the problem you are solving. - -[ FORMAT ] -Thoughts: - - - -Action: -``` - -``` -[ END FORMAT ] - -Do not provide anything extra just your thought and action. -""" - -SYSTEM_MESSAGE = f"""SYSTEM INFO: -You are an autonomous coding agent, here to provide solutions for coding issues. -You have been designed to assist with a wide range of programming tasks, from code editing and debugging to testing and deployment. -You have access to a variety of tools and commands that you can use to help you solve problems efficiently. - -{GENERAL_GUIDELINES} - -{DOCUMENTATION} -""".strip() - - -def NO_ACTION(latest): - return f""" -You did not include any action to take in your most recent output: - -===== Output ====== -{latest} -==== End Output === - -Remember these are the custom commands you can use: -{DOCUMENTATION} - -Lets try that again, it is very important that you adhere to the output format -This time, be sure to use the exact format below, replacing anything in <> with the appropriate value(s): -{RESPONSE_FORMAT} - -It is crucial you use the format provided as the output will be parsed automatically. -""" - - -def file_info(file: str, line: int): - if file: - return f"""CURRENT WORKSPACE: - Open File: {file} on line {line} - You can use these commands with the current file: - Navigation: `scroll_up`, `scroll_down`, and `goto ` - Modification: `edit ` - """ - - -def STEP_PROMPT(task, file, line_num): - return f""" -{RESPONSE_FORMAT} -You are currently trying to complete this task: -{task} - -{file_info(file, line_num)} - -Keep all of the guidelines above in mind when you are thinking and making code. -Please come up with a thought and action based on your current task and latest steps. -Make sure that you do not repeat the same actions, there will not be any changes in result if you do not changes anything. -Be very strict about the formatting that you use and make sure you follow the guidelines. -NEVER output multiple commands. ONLY take ONE STEP at a time. -When you have completed your task run the "exit" command. -Begin with your thought about the next step and then come up with an action to perform your thought. -""".strip() - - -def unpack_dict(data: dict, restrict: list[str] | None = None): - lines = [] - restrict = [] if restrict is None else restrict - for key, value in data.items(): - if key in restrict: - continue - elif isinstance(value, dict): - nested_str = unpack_dict(value, restrict).replace('\n', '\n ') - val = f'{key}:' + '\n ' + f'{nested_str}' - lines.append(val) - else: - lines.append(f'{key}: {value}') - return '\n'.join(lines) - - -def MEMORY_FORMAT(act, obs): - return f""" -Previous Action: -{unpack_dict(act, ["content"])} - -Output from Action: -{unpack_dict(obs)} -""".strip() - - -def CONTEXT_PROMPT(memory, window): - res = f'These are your past {window} actions:\n' - window_size = window if len(memory) > window else len(memory) - cur_mems = memory[-window_size:] - res += '===== Previous Actions =====\n' - for idx, mem in enumerate(cur_mems): - res += f'\nMemory {idx}:\n{mem}\n' - res += '======= End Actions =======\n' - res += 'Use these memories to provide additional context to the problem you are solving.\nRemember that you have already completed these steps so you do not need to perform them again.' - return res diff --git a/agenthub/__init__.py b/agenthub/__init__.py index 5f3355dc9c48..7e68f50d3e80 100644 --- a/agenthub/__init__.py +++ b/agenthub/__init__.py @@ -9,7 +9,6 @@ from . import ( # noqa: E402 - SWE_agent, browsing_agent, codeact_agent, codeact_swe_agent, @@ -24,7 +23,6 @@ 'codeact_agent', 'codeact_swe_agent', 'planner_agent', - 'SWE_agent', 'delegator_agent', 'dummy_agent', 'browsing_agent', diff --git a/agenthub/browsing_agent/browsing_agent.py b/agenthub/browsing_agent/browsing_agent.py index 2bce4d804f0c..91a59cd282b7 100644 --- a/agenthub/browsing_agent/browsing_agent.py +++ b/agenthub/browsing_agent/browsing_agent.py @@ -1,8 +1,9 @@ -import ast +import os from browsergym.core.action.highlevel import HighLevelActionSet from browsergym.utils.obs import flatten_axtree_to_str +from agenthub.browsing_agent.response_parser import BrowsingResponseParser from opendevin.controller.agent import Agent from opendevin.controller.state.state import State from opendevin.core.logger import opendevin_logger as logger @@ -12,27 +13,76 @@ BrowseInteractiveAction, MessageAction, ) +from opendevin.events.event import EventSource from opendevin.events.observation import BrowserOutputObservation +from opendevin.events.observation.observation import Observation from opendevin.llm.llm import LLM from opendevin.runtime.plugins import ( PluginRequirement, ) +from opendevin.runtime.tools import RuntimeTool +USE_NAV = ( + os.environ.get('USE_NAV', 'true') == 'true' +) # only disable NAV actions when running webarena and miniwob benchmarks +USE_CONCISE_ANSWER = ( + os.environ.get('USE_CONCISE_ANSWER', 'false') == 'true' +) # only return concise answer when running webarena and miniwob benchmarks -def parse_response(response: str) -> Action: - if '```' not in response: - # unexpected response format, message back to user - return MessageAction(response) - thought = response.split('```')[0].strip() - action_str = response.split('```')[1].strip() - # handle send message to user function call in BrowserGym - for sub_action in action_str.split('\n'): - if 'send_msg_to_user(' in sub_action: - tree = ast.parse(sub_action) - args = tree.body[0].value.args # type: ignore - return MessageAction(args[0].value) +if not USE_NAV and USE_CONCISE_ANSWER: + EVAL_MODE = True # disabled NAV actions and only return concise answer, for webarena and miniwob benchmarks\ +else: + EVAL_MODE = False - return BrowseInteractiveAction(browser_actions=action_str, thought=thought) + +def get_error_prefix(last_browser_action: str) -> str: + return f'IMPORTANT! Last action is incorrect:\n{last_browser_action}\nThink again with the current observation of the page.\n' + + +def get_system_message(goal: str, action_space: str) -> str: + return f"""\ +# Instructions +Review the current state of the page and all other information to find the best +possible next action to accomplish your goal. Your answer will be interpreted +and executed by a program, make sure to follow the formatting instructions. + +# Goal: +{goal} + +# Action Space +{action_space} +""" + + +CONCISE_INSTRUCTION = """\ + +Here is another example with chain of thought of a valid action when providing a concise answer to user: +" +In order to accomplish my goal I need to send the information asked back to the user. This page list the information of HP Inkjet Fax Machine, which is the product identified in the objective. Its price is $279.49. I will send a message back to user with the answer. +```send_msg_to_user("$279.49")``` +" +""" + + +def get_prompt(error_prefix: str, cur_axtree_txt: str, prev_action_str: str) -> str: + prompt = f"""\ +{error_prefix} + +# Current Accessibility Tree: +{cur_axtree_txt} + +# Previous Actions +{prev_action_str} + +Here is an example with chain of thought of a valid action when clicking on a button: +" +In order to accomplish my goal I need to click on the button with bid 12 +```click("12")``` +" +""".strip() + if USE_CONCISE_ANSWER: + prompt += CONCISE_INSTRUCTION + return prompt class BrowsingAgent(Agent): @@ -42,25 +92,26 @@ class BrowsingAgent(Agent): """ sandbox_plugins: list[PluginRequirement] = [] + runtime_tools: list[RuntimeTool] = [RuntimeTool.BROWSER] + response_parser = BrowsingResponseParser() def __init__( self, llm: LLM, ) -> None: - """ - Initializes a new instance of the BrowsingAgent class. + """Initializes a new instance of the BrowsingAgent class. Parameters: - llm (LLM): The llm to be used by this agent """ super().__init__(llm) + # define a configurable action space, with chat functionality, web navigation, and webpage grounding using accessibility tree and HTML. + # see https://github.com/ServiceNow/BrowserGym/blob/main/core/src/browsergym/core/action/highlevel.py for more details + action_subsets = ['chat', 'bid'] + if USE_NAV: + action_subsets.append('nav') self.action_space = HighLevelActionSet( - # see https://github.com/ServiceNow/BrowserGym/blob/main/core/src/browsergym/core/action/highlevel.py for more details - subsets=[ - 'chat', - 'bid', - 'nav', - ], # define a configurable action space, with chat functionality, web navigation, and webpage grounding using accessibility tree and HTML. + subsets=action_subsets, strict=False, # less strict on the parsing of the actions multiaction=True, # enable to agent to take multiple actions at once ) @@ -68,15 +119,13 @@ def __init__( self.reset() def reset(self) -> None: - """ - Resets the Browsing Agent. - """ + """Resets the Browsing Agent.""" super().reset() self.cost_accumulator = 0 + self.error_accumulator = 0 def step(self, state: State) -> Action: - """ - Performs one step using the Browsing Agent. + """Performs one step using the Browsing Agent. This includes gathering information on previous steps and prompting the model to make a browsing command to execute. Parameters: @@ -87,81 +136,76 @@ def step(self, state: State) -> Action: - MessageAction(content) - Message action to run (e.g. ask for clarification) - AgentFinishAction() - end the interaction """ - goal = state.get_current_user_intent() messages = [] - prev_actions = '' + prev_actions = [] cur_axtree_txt = '' error_prefix = '' last_obs = None - for prev_action, obs in state.history: - if isinstance(prev_action, BrowseInteractiveAction): - prev_actions += f'{prev_action.browser_actions}\n' - last_obs = obs - elif ( - isinstance(prev_action, MessageAction) and prev_action.source != 'user' - ): - # agent has responded, task finish. - return AgentFinishAction() + last_action = None + + if EVAL_MODE and len(state.history.get_events_as_list()) == 1: + # for webarena and miniwob++ eval, we need to retrieve the initial observation already in browser env + # initialize and retrieve the first observation by issuing an noop OP + # For non-benchmark browsing, the browser env starts with a blank page, and the agent is expected to first navigate to desired websites + return BrowseInteractiveAction(browser_actions='noop()') + + for event in state.history.get_events(): + if isinstance(event, BrowseInteractiveAction): + prev_actions.append(event.browser_actions) + last_action = event + elif isinstance(event, MessageAction) and event.source == EventSource.AGENT: + # agent has responded, task finished. + return AgentFinishAction(outputs={'content': event.content}) + elif isinstance(event, Observation): + last_obs = event + + if EVAL_MODE: + prev_actions = prev_actions[1:] # remove the first noop action + + prev_action_str = '\n'.join(prev_actions) + # if the final BrowserInteractiveAction exec BrowserGym's send_msg_to_user, + # we should also send a message back to the user in OpenDevin and call it a day + if ( + isinstance(last_action, BrowseInteractiveAction) + and last_action.browsergym_send_msg_to_user + ): + return MessageAction(last_action.browsergym_send_msg_to_user) if isinstance(last_obs, BrowserOutputObservation): if last_obs.error: # add error recovery prompt prefix - error_prefix = f'IMPORTANT! Last action is incorrect:\n{last_obs.last_browser_action}\nThink again with the current observation of the page.\n' - cur_axtree_txt = flatten_axtree_to_str(last_obs.axtree_object) - - system_msg = f"""\ -# Instructions -Review the current state of the page and all other information to find the best -possible next action to accomplish your goal. Your answer will be interpreted -and executed by a program, make sure to follow the formatting instructions. - -# Goal: -{goal} - -# Action Space -{self.action_space.describe(with_long_description=False, with_examples=True)} -""" + error_prefix = get_error_prefix(last_obs.last_browser_action) + self.error_accumulator += 1 + if self.error_accumulator > 5: + return MessageAction('Too many errors encountered. Task failed.') + try: + cur_axtree_txt = flatten_axtree_to_str( + last_obs.axtree_object, + extra_properties=last_obs.extra_element_properties, + with_clickable=True, + filter_visible_only=True, + ) + except Exception as e: + logger.error( + 'Error when trying to process the accessibility tree: %s', e + ) + return MessageAction('Error encountered when browsing.') + + if (goal := state.get_current_user_intent()) is None: + goal = state.inputs['task'] + system_msg = get_system_message( + goal, + self.action_space.describe(with_long_description=False, with_examples=True), + ) messages.append({'role': 'system', 'content': system_msg}) - prompt = f"""\ -{error_prefix} - -# Current Accessibility Tree: -{cur_axtree_txt} - -# Previous Actions -{prev_actions} - -Here is an example with chain of thought of a valid action when clicking on a button: -" -In order to accomplish my goal I need to click on the button with bid 12 -```click("12")``` -" -""".strip() + prompt = get_prompt(error_prefix, cur_axtree_txt, prev_action_str) messages.append({'role': 'user', 'content': prompt}) + logger.debug(prompt) response = self.llm.completion( messages=messages, temperature=0.0, + stop=[')```', ')\n```'], ) - self.log_cost(response) - action_resp = response['choices'][0]['message']['content'] - logger.info(prompt) - logger.info(action_resp) - return parse_response(action_resp) - - def search_memory(self, query: str) -> list[str]: - raise NotImplementedError('Implement this abstract method') - - def log_cost(self, response): - # TODO: refactor to unified cost tracking - try: - cur_cost = self.llm.completion_cost(response) - except Exception: - cur_cost = 0 - self.cost_accumulator += cur_cost - logger.info( - 'Cost: %.2f USD | Accumulated Cost: %.2f USD', - cur_cost, - self.cost_accumulator, - ) + return self.response_parser.parse(response) diff --git a/agenthub/browsing_agent/prompt.py b/agenthub/browsing_agent/prompt.py index 9a56706679fe..3dfd7f5022bb 100644 --- a/agenthub/browsing_agent/prompt.py +++ b/agenthub/browsing_agent/prompt.py @@ -75,7 +75,8 @@ class PromptElement: Prompt elements are used to build the prompt. Use flags to control which prompt elements are visible. We use class attributes as a convenient way to implement static prompts, but feel free to override them with instance - attributes or @property decorator.""" + attributes or @property decorator. + """ _prompt = '' _abstract_ex = '' @@ -146,7 +147,7 @@ def shrink(self) -> None: """Implement shrinking of this prompt element. You need to recursively call all shrinkable elements that are part of - this prompt. You can also implement a shriking startegy for this prompt. + this prompt. You can also implement a shrinking strategy for this prompt. Shrinking is can be called multiple times to progressively shrink the prompt until it fits max_tokens. Default max shrink iterations is 20. """ @@ -161,7 +162,7 @@ class Truncater(Shrinkable): def __init__(self, visible, shrink_speed=0.3, start_truncate_iteration=10): super().__init__(visible=visible) - self.shrink_speed = shrink_speed # the percentage shrinked in each iteration + self.shrink_speed = shrink_speed # the percentage shrunk in each iteration self.start_truncate_iteration = ( start_truncate_iteration # the iteration to start truncating ) @@ -200,11 +201,10 @@ def fit_tokens( model_name : str, optional The name of the model used when tokenizing. - Returns + Returns: ------- str : the prompt after shrinking. """ - if max_prompt_chars is None: return shrinkable.prompt @@ -494,11 +494,13 @@ def _get_action_space(flags: Flags) -> AbstractActionSet: action_space = PythonActionSet(strict=flags.is_strict) if flags.multi_actions: warn( - f'Flag action_space={repr(flags.action_space)} incompatible with multi_actions={repr(flags.multi_actions)}.' + f'Flag action_space={repr(flags.action_space)} incompatible with multi_actions={repr(flags.multi_actions)}.', + stacklevel=2, ) if flags.demo_mode != 'off': warn( - f'Flag action_space={repr(flags.action_space)} incompatible with demo_mode={repr(flags.demo_mode)}.' + f'Flag action_space={repr(flags.action_space)} incompatible with demo_mode={repr(flags.demo_mode)}.', + stacklevel=2, ) return action_space case 'bid': @@ -577,8 +579,8 @@ def _parse_answer(self, text_answer): def diff(previous, new): """Return a string showing the difference between original and new. - If the difference is above diff_threshold, return the diff string.""" - + If the difference is above diff_threshold, return the diff string. + """ if previous == new: return 'Identical', [] diff --git a/agenthub/browsing_agent/response_parser.py b/agenthub/browsing_agent/response_parser.py new file mode 100644 index 000000000000..9b64748880ee --- /dev/null +++ b/agenthub/browsing_agent/response_parser.py @@ -0,0 +1,88 @@ +import ast + +from opendevin.controller.action_parser import ActionParser, ResponseParser +from opendevin.core.logger import opendevin_logger as logger +from opendevin.events.action import ( + Action, + BrowseInteractiveAction, +) + + +class BrowsingResponseParser(ResponseParser): + def __init__(self): + # Need to pay attention to the item order in self.action_parsers + super().__init__() + self.action_parsers = [BrowsingActionParserMessage()] + self.default_parser = BrowsingActionParserBrowseInteractive() + + def parse(self, response: str) -> Action: + action_str = self.parse_response(response) + return self.parse_action(action_str) + + def parse_response(self, response) -> str: + action_str = response['choices'][0]['message']['content'] + if action_str is None: + return '' + action_str = action_str.strip() + if not action_str.endswith('```'): + action_str = action_str + ')```' + logger.info(action_str) + return action_str + + def parse_action(self, action_str: str) -> Action: + for action_parser in self.action_parsers: + if action_parser.check_condition(action_str): + return action_parser.parse(action_str) + return self.default_parser.parse(action_str) + + +class BrowsingActionParserMessage(ActionParser): + """Parser action: + - BrowseInteractiveAction(browser_actions) - unexpected response format, message back to user + """ + + def __init__( + self, + ): + pass + + def check_condition(self, action_str: str) -> bool: + return '```' not in action_str + + def parse(self, action_str: str) -> Action: + msg = f'send_msg_to_user("""{action_str}""")' + return BrowseInteractiveAction( + browser_actions=msg, + thought=action_str, + browsergym_send_msg_to_user=action_str, + ) + + +class BrowsingActionParserBrowseInteractive(ActionParser): + """Parser action: + - BrowseInteractiveAction(browser_actions) - handle send message to user function call in BrowserGym + """ + + def __init__( + self, + ): + pass + + def check_condition(self, action_str: str) -> bool: + return True + + def parse(self, action_str: str) -> Action: + thought = action_str.split('```')[0].strip() + action_str = action_str.split('```')[1].strip() + msg_content = '' + for sub_action in action_str.split('\n'): + if 'send_msg_to_user(' in sub_action: + tree = ast.parse(sub_action) + args = tree.body[0].value.args # type: ignore + msg_content = args[0].value + + return BrowseInteractiveAction( + browser_actions=action_str, + thought=thought, + browsergym_send_msg_to_user=msg_content, + ) diff --git a/agenthub/browsing_agent/utils.py b/agenthub/browsing_agent/utils.py index 646618739780..8e67679966ae 100644 --- a/agenthub/browsing_agent/utils.py +++ b/agenthub/browsing_agent/utils.py @@ -7,7 +7,6 @@ def yaml_parser(message): """Parse a yaml message for the retry function.""" - # saves gpt-3.5 from some yaml parsing errors message = re.sub(r':\s*\n(?=\S|\n)', ': ', message) @@ -16,7 +15,7 @@ def yaml_parser(message): valid = True retry_message = '' except yaml.YAMLError as e: - warn(str(e)) + warn(str(e), stacklevel=2) value = {} valid = False retry_message = "Your response is not a valid yaml. Please try again and be careful to the format. Don't add any apology or comment, just the answer." @@ -47,7 +46,6 @@ def _compress_chunks(text, identifier, skip_list, split_regex='\n\n+'): def compress_string(text): """Compress a string by replacing redundant paragraphs and lines with identifiers.""" - # Perform paragraph-level compression def_dict, compressed_text = _compress_chunks( text, identifier='§', skip_list=[], split_regex='\n\n+' @@ -79,12 +77,12 @@ def extract_html_tags(text, keys): keys : list of str The HTML tags to extract the content from. - Returns + Returns: ------- dict A dictionary mapping each key to a list of subset in `text` that match the key. - Notes + Notes: ----- All text and keys will be converted to lowercase before matching. @@ -126,7 +124,7 @@ def parse_html_tags(text, keys=(), optional_keys=(), merge_multiple=False): optional_keys : list of str The HTML tags to extract the content from, but are optional. - Returns + Returns: ------- dict A dictionary mapping each key to subset of `text` that match the key. diff --git a/agenthub/codeact_agent/action_parser.py b/agenthub/codeact_agent/action_parser.py new file mode 100644 index 000000000000..12689549996d --- /dev/null +++ b/agenthub/codeact_agent/action_parser.py @@ -0,0 +1,177 @@ +import re + +from opendevin.controller.action_parser import ActionParser, ResponseParser +from opendevin.events.action import ( + Action, + AgentDelegateAction, + AgentFinishAction, + CmdRunAction, + IPythonRunCellAction, + MessageAction, +) + + +class CodeActResponseParser(ResponseParser): + """Parser action: + - CmdRunAction(command) - bash command to run + - IPythonRunCellAction(code) - IPython code to run + - AgentDelegateAction(agent, inputs) - delegate action for (sub)task + - MessageAction(content) - Message action to run (e.g. ask for clarification) + - AgentFinishAction() - end the interaction + """ + + def __init__(self): + # Need pay attention to the item order in self.action_parsers + super().__init__() + self.action_parsers = [ + CodeActActionParserFinish(), + CodeActActionParserCmdRun(), + CodeActActionParserIPythonRunCell(), + CodeActActionParserAgentDelegate(), + ] + self.default_parser = CodeActActionParserMessage() + + def parse(self, response) -> Action: + action_str = self.parse_response(response) + return self.parse_action(action_str) + + def parse_response(self, response) -> str: + action = response.choices[0].message.content + if action is None: + return '' + for lang in ['bash', 'ipython', 'browse']: + if f'' in action and f'' not in action: + action += f'' + return action + + def parse_action(self, action_str: str) -> Action: + for action_parser in self.action_parsers: + if action_parser.check_condition(action_str): + return action_parser.parse(action_str) + return self.default_parser.parse(action_str) + + +class CodeActActionParserFinish(ActionParser): + """Parser action: + - AgentFinishAction() - end the interaction + """ + + def __init__( + self, + ): + self.finish_command = None + + def check_condition(self, action_str: str) -> bool: + self.finish_command = re.search(r'.*', action_str, re.DOTALL) + return self.finish_command is not None + + def parse(self, action_str: str) -> Action: + assert ( + self.finish_command is not None + ), 'self.finish_command should not be None when parse is called' + thought = action_str.replace(self.finish_command.group(0), '').strip() + return AgentFinishAction(thought=thought) + + +class CodeActActionParserCmdRun(ActionParser): + """Parser action: + - CmdRunAction(command) - bash command to run + - AgentFinishAction() - end the interaction + """ + + def __init__( + self, + ): + self.bash_command = None + + def check_condition(self, action_str: str) -> bool: + self.bash_command = re.search( + r'(.*?)', action_str, re.DOTALL + ) + return self.bash_command is not None + + def parse(self, action_str: str) -> Action: + assert ( + self.bash_command is not None + ), 'self.bash_command should not be None when parse is called' + thought = action_str.replace(self.bash_command.group(0), '').strip() + # a command was found + command_group = self.bash_command.group(1).strip() + if command_group.strip() == 'exit': + return AgentFinishAction() + return CmdRunAction(command=command_group, thought=thought) + + +class CodeActActionParserIPythonRunCell(ActionParser): + """Parser action: + - IPythonRunCellAction(code) - IPython code to run + """ + + def __init__( + self, + ): + self.python_code = None + self.jupyter_kernel_init_code: str = 'from agentskills import *' + + def check_condition(self, action_str: str) -> bool: + self.python_code = re.search( + r'(.*?)', action_str, re.DOTALL + ) + return self.python_code is not None + + def parse(self, action_str: str) -> Action: + assert ( + self.python_code is not None + ), 'self.python_code should not be None when parse is called' + code_group = self.python_code.group(1).strip() + thought = action_str.replace(self.python_code.group(0), '').strip() + return IPythonRunCellAction( + code=code_group, + thought=thought, + kernel_init_code=self.jupyter_kernel_init_code, + ) + + +class CodeActActionParserAgentDelegate(ActionParser): + """Parser action: + - AgentDelegateAction(agent, inputs) - delegate action for (sub)task + """ + + def __init__( + self, + ): + self.agent_delegate = None + + def check_condition(self, action_str: str) -> bool: + self.agent_delegate = re.search( + r'(.*)', action_str, re.DOTALL + ) + return self.agent_delegate is not None + + def parse(self, action_str: str) -> Action: + assert ( + self.agent_delegate is not None + ), 'self.agent_delegate should not be None when parse is called' + thought = action_str.replace(self.agent_delegate.group(0), '').strip() + browse_actions = self.agent_delegate.group(1).strip() + task = f'{thought}. I should start with: {browse_actions}' + return AgentDelegateAction(agent='BrowsingAgent', inputs={'task': task}) + + +class CodeActActionParserMessage(ActionParser): + """Parser action: + - MessageAction(content) - Message action to run (e.g. ask for clarification) + """ + + def __init__( + self, + ): + pass + + def check_condition(self, action_str: str) -> bool: + # We assume the LLM is GOOD enough that when it returns pure natural language + # it wants to talk to the user + return True + + def parse(self, action_str: str) -> Action: + return MessageAction(content=action_str, wait_for_response=True) diff --git a/agenthub/codeact_agent/codeact_agent.py b/agenthub/codeact_agent/codeact_agent.py index 6d11de010cdf..42c95f17e850 100644 --- a/agenthub/codeact_agent/codeact_agent.py +++ b/agenthub/codeact_agent/codeact_agent.py @@ -1,5 +1,4 @@ -import re - +from agenthub.codeact_agent.action_parser import CodeActResponseParser from agenthub.codeact_agent.prompt import ( COMMAND_DOCS, EXAMPLES, @@ -9,44 +8,39 @@ ) from opendevin.controller.agent import Agent from opendevin.controller.state.state import State +from opendevin.core.config import config from opendevin.events.action import ( Action, + AgentDelegateAction, AgentFinishAction, - BrowseInteractiveAction, CmdRunAction, IPythonRunCellAction, MessageAction, ) from opendevin.events.observation import ( - BrowserOutputObservation, + AgentDelegateObservation, CmdOutputObservation, IPythonRunCellObservation, ) +from opendevin.events.serialization.event import truncate_content from opendevin.llm.llm import LLM from opendevin.runtime.plugins import ( AgentSkillsRequirement, JupyterRequirement, PluginRequirement, ) +from opendevin.runtime.tools import RuntimeTool ENABLE_GITHUB = True -def parse_response(response) -> str: - action = response.choices[0].message.content - for lang in ['bash', 'ipython', 'browse']: - if f'' in action and f'' not in action: - action += f'' - return action - - def action_to_str(action: Action) -> str: if isinstance(action, CmdRunAction): return f'{action.thought}\n\n{action.command}\n' elif isinstance(action, IPythonRunCellAction): return f'{action.thought}\n\n{action.code}\n' - elif isinstance(action, BrowseInteractiveAction): - return f'{action.thought}\n\n{action.browser_actions}\n' + elif isinstance(action, AgentDelegateAction): + return f'{action.thought}\n\n{action.inputs["task"]}\n' elif isinstance(action, MessageAction): return action.content return '' @@ -54,7 +48,7 @@ def action_to_str(action: Action) -> str: def get_action_message(action: Action) -> dict[str, str] | None: if ( - isinstance(action, BrowseInteractiveAction) + isinstance(action, AgentDelegateAction) or isinstance(action, CmdRunAction) or isinstance(action, IPythonRunCellAction) or isinstance(action, MessageAction) @@ -67,10 +61,13 @@ def get_action_message(action: Action) -> dict[str, str] | None: def get_observation_message(obs) -> dict[str, str] | None: + max_message_chars = config.get_llm_config_from_agent( + 'CodeActAgent' + ).max_message_chars if isinstance(obs, CmdOutputObservation): - content = 'OBSERVATION:\n' + truncate_observation(obs.content) + content = 'OBSERVATION:\n' + truncate_content(obs.content, max_message_chars) content += ( - f'\n[Command {obs.command_id} finished with exit code {obs.exit_code}]]' + f'\n[Command {obs.command_id} finished with exit code {obs.exit_code}]' ) return {'role': 'user', 'content': content} elif isinstance(obs, IPythonRunCellObservation): @@ -83,28 +80,16 @@ def get_observation_message(obs) -> dict[str, str] | None: '![image](data:image/png;base64, ...) already displayed to user' ) content = '\n'.join(splitted) - content = truncate_observation(content) + content = truncate_content(content, max_message_chars) return {'role': 'user', 'content': content} - elif isinstance(obs, BrowserOutputObservation): - content = 'OBSERVATION:\n' + truncate_observation(obs.content) + elif isinstance(obs, AgentDelegateObservation): + content = 'OBSERVATION:\n' + truncate_content( + str(obs.outputs), max_message_chars + ) return {'role': 'user', 'content': content} return None -def truncate_observation(observation: str, max_chars: int = 10_000) -> str: - """ - Truncate the middle of the observation if it is too long. - """ - if len(observation) <= max_chars: - return observation - half = max_chars // 2 - return ( - observation[:half] - + '\n[... Observation truncated due to length ...]\n' - + observation[-half:] - ) - - # FIXME: We can tweak these two settings to create MicroAgents specialized toward different area def get_system_message() -> str: if ENABLE_GITHUB: @@ -118,7 +103,7 @@ def get_in_context_example() -> str: class CodeActAgent(Agent): - VERSION = '1.5' + VERSION = '1.8' """ The Code Act Agent is a minimalist agent. The agent works by passing the model a list of action-observation pairs and prompting the model to take the next step. @@ -157,22 +142,23 @@ class CodeActAgent(Agent): sandbox_plugins: list[PluginRequirement] = [ # NOTE: AgentSkillsRequirement need to go before JupyterRequirement, since - # AgentSkillsRequirement provides a lot of Python functions - # and it need to be initialized before Jupyter for Jupyter to use those functions. + # AgentSkillsRequirement provides a lot of Python functions, + # and it needs to be initialized before Jupyter for Jupyter to use those functions. AgentSkillsRequirement(), JupyterRequirement(), ] - jupyter_kernel_init_code: str = 'from agentskills import *' + runtime_tools: list[RuntimeTool] = [RuntimeTool.BROWSER] system_message: str = get_system_message() in_context_example: str = f"Here is an example of how you can interact with the environment for task solving:\n{get_in_context_example()}\n\nNOW, LET'S START!" + action_parser = CodeActResponseParser() + def __init__( self, llm: LLM, ) -> None: - """ - Initializes a new instance of the CodeActAgent class. + """Initializes a new instance of the CodeActAgent class. Parameters: - llm (LLM): The llm to be used by this agent @@ -181,49 +167,32 @@ def __init__( self.reset() def reset(self) -> None: - """ - Resets the CodeAct Agent. - """ + """Resets the CodeAct Agent.""" super().reset() def step(self, state: State) -> Action: - """ - Performs one step using the CodeAct Agent. + """Performs one step using the CodeAct Agent. This includes gathering info on previous steps and prompting the model to make a command to execute. Parameters: - - state (State): used to get updated info and background commands + - state (State): used to get updated info Returns: - CmdRunAction(command) - bash command to run - IPythonRunCellAction(code) - IPython code to run - - BrowseInteractiveAction(browsergym_command) - BrowserGym commands to run + - AgentDelegateAction(agent, inputs) - delegate action for (sub)task - MessageAction(content) - Message action to run (e.g. ask for clarification) - AgentFinishAction() - end the interaction """ - messages: list[dict[str, str]] = [ - {'role': 'system', 'content': self.system_message}, - {'role': 'user', 'content': self.in_context_example}, - ] + # if we're done, go back + latest_user_message = state.history.get_last_user_message() + if latest_user_message and latest_user_message.strip() == '/exit': + return AgentFinishAction() - for prev_action, obs in state.history: - action_message = get_action_message(prev_action) - if action_message: - messages.append(action_message) + # prepare what we want to send to the LLM + messages: list[dict[str, str]] = self._get_messages(state) - obs_message = get_observation_message(obs) - if obs_message: - messages.append(obs_message) - - latest_user_message = [m for m in messages if m['role'] == 'user'][-1] - if latest_user_message: - if latest_user_message['content'].strip() == '/exit': - return AgentFinishAction() - latest_user_message['content'] += ( - f'\n\nENVIRONMENT REMINDER: You have {state.max_iterations - state.iteration} turns left to complete the task.' - ) - - response = self.llm.do_completion( + response = self.llm.completion( messages=messages, stop=[ '', @@ -232,50 +201,36 @@ def step(self, state: State) -> Action: ], temperature=0.0, ) + return self.action_parser.parse(response) - action_str: str = parse_response(response) - state.num_of_chars += sum( - len(message['content']) for message in messages - ) + len(action_str) - - if finish_command := re.search(r'.*', action_str, re.DOTALL): - thought = action_str.replace(finish_command.group(0), '').strip() - return AgentFinishAction(thought=thought) - if bash_command := re.search( - r'(.*?)', action_str, re.DOTALL - ): - # remove the command from the action string to get thought - thought = action_str.replace(bash_command.group(0), '').strip() - # a command was found - command_group = bash_command.group(1).strip() - - if command_group.strip() == 'exit': - return AgentFinishAction() - return CmdRunAction(command=command_group, thought=thought) - elif python_code := re.search( - r'(.*?)', action_str, re.DOTALL - ): - # a code block was found - code_group = python_code.group(1).strip() - thought = action_str.replace(python_code.group(0), '').strip() - return IPythonRunCellAction( - code=code_group, - thought=thought, - kernel_init_code=self.jupyter_kernel_init_code, + def _get_messages(self, state: State) -> list[dict[str, str]]: + messages = [ + {'role': 'system', 'content': self.system_message}, + {'role': 'user', 'content': self.in_context_example}, + ] + + for event in state.history.get_events(): + # create a regular message from an event + message = ( + get_action_message(event) + if isinstance(event, Action) + else get_observation_message(event) ) - elif browse_command := re.search( - r'(.*)', action_str, re.DOTALL - ): - # BrowserGym actions was found - browse_actions = browse_command.group(1).strip() - thought = action_str.replace(browse_command.group(0), '').strip() - return BrowseInteractiveAction( - browser_actions=browse_actions, thought=thought + + # add regular message + if message: + messages.append(message) + + # the latest user message is important: + # we want to remind the agent of the environment constraints + latest_user_message = next( + (m for m in reversed(messages) if m['role'] == 'user'), None + ) + + # add a reminder to the prompt + if latest_user_message: + latest_user_message['content'] += ( + f'\n\nENVIRONMENT REMINDER: You have {state.max_iterations - state.iteration} turns left to complete the task. When finished reply with ' ) - else: - # We assume the LLM is GOOD enough that when it returns pure natural language - # it want to talk to the user - return MessageAction(content=action_str, wait_for_response=True) - def search_memory(self, query: str) -> list[str]: - raise NotImplementedError('Implement this abstract method') + return messages diff --git a/agenthub/codeact_agent/prompt.py b/agenthub/codeact_agent/prompt.py index 9188cc4080a2..0ba804ccb2eb 100644 --- a/agenthub/codeact_agent/prompt.py +++ b/agenthub/codeact_agent/prompt.py @@ -5,35 +5,43 @@ COMMAND_DOCS = ( '\nApart from the standard Python library, the assistant can also use the following functions (already imported) in environment:\n' f'{_AGENT_SKILLS_DOCS}' - "Please note that THE `edit_file` FUNCTION REQUIRES PROPER INDENTATION. If the assistant would like to add the line ' print(x)', it must fully write that out, with all those spaces before the code! Indentation is important and code that is not indented correctly will fail and require fixing before it can be run." + "Please note that THE `edit_file_by_replace`, `append_file` and `insert_content_at_line` FUNCTIONS REQUIRE PROPER INDENTATION. If the assistant would like to add the line ' print(x)', it must fully write that out, with all those spaces before the code! Indentation is important and code that is not indented correctly will fail and require fixing before it can be run." ) # ======= SYSTEM MESSAGE ======= MINIMAL_SYSTEM_PREFIX = """A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. -The assistant can interact with an interactive Python (Jupyter Notebook) environment and receive the corresponding output when needed. The code should be enclosed using "" tag, for example: +The assistant can use an interactive Python (Jupyter Notebook) environment, executing code with . print("Hello World!") The assistant can execute bash commands on behalf of the user by wrapping them with and . + For example, you can list the files in the current directory by ls . +Important, however: do not run interactive commands. You do not have access to stdin. +Also, you need to handle commands that may run indefinitely and not return a result. For such cases, you should redirect the output to a file and run the command in the background to avoid blocking the execution. +For example, to run a Python script that might run indefinitely without returning immediately, you can use the following format: python3 app.py > server.log 2>&1 & +Also, if a command execution result saying like: Command: "npm start" timed out. Sending SIGINT to the process, you should also retry with running the command in the background. """ -BROWSING_PREFIX = """The assistant can browse the Internet with commands on behalf of the user by wrapping them with and . -For example, you can browse a given URL by goto("") . -The assistant should attempt fewer things at a time instead of putting too much commands OR code in one "execute" block. +BROWSING_PREFIX = """The assistant can browse the Internet with and . +For example, Tell me the usa's president using google search . +Or Tell me what is in http://example.com . """ PIP_INSTALL_PREFIX = """The assistant can install Python packages using the %pip magic command in an IPython environment by using the following syntax: %pip install [package needed] and should always import packages and define variables before starting to use them.""" SYSTEM_PREFIX = MINIMAL_SYSTEM_PREFIX + BROWSING_PREFIX + PIP_INSTALL_PREFIX -GITHUB_MESSAGE = """To do any activities on GitHub, the assistant should use the token in the $GITHUB_TOKEN environment variable. -For instance, to push a local branch `my_branch` to the github repo `owner/repo`, the assistant can use the following four commands: +GITHUB_MESSAGE = """To interact with GitHub, use the $GITHUB_TOKEN environment variable. +For example, to push a branch `my_branch` to the GitHub repo `owner/repo`: git push https://$GITHUB_TOKEN@github.com/owner/repo.git my_branch -If the assistant require access to GitHub but $GITHUB_TOKEN is not set, ask the user to set it.""" - -SYSTEM_SUFFIX = """The assistant's response should be concise. -The assistant should include ONLY ONE or or in every one of the responses, unless the assistant is finished with the task or need more input or action from the user in order to proceed. -IMPORTANT: Whenever possible, execute the code for the user using or or instead of providing it. +If $GITHUB_TOKEN is not set, ask the user to set it.""" + +SYSTEM_SUFFIX = """Responses should be concise. +The assistant should attempt fewer things at a time instead of putting too many commands OR too much code in one "execute" block. +Include ONLY ONE , , or per response, unless the assistant is finished with the task or needs more input or action from the user in order to proceed. +If the assistant is finished with the task you MUST include in your response. +IMPORTANT: Execute code using , , or whenever possible. +When handling files, try to use full paths and pwd to avoid errors. """ @@ -41,10 +49,10 @@ EXAMPLES = """ --- START OF EXAMPLE --- -USER: Can you create a list of numbers from 1 to 10, and create a web page to display them at port 5000? +USER: Create a list of numbers from 1 to 10, and display them in a web page at port 5000. ASSISTANT: -Sure! Let me create a file first: +Sure! Let me create a Python file `app.py`: create_file('app.py') @@ -52,7 +60,9 @@ USER: OBSERVATION: [File: /workspace/app.py (1 lines total)] +(this is the beginning of the file) 1| +(this is the end of the file) [File app.py created.] ASSISTANT: @@ -68,11 +78,17 @@ def index(): if __name__ == '__main__': app.run(port=5000)\"\"\" -edit_file(start=1, end=1, content=EDITED_CODE) + +insert_content_at_line( + 'app.py', + 1, + EDITED_CODE, +) USER: OBSERVATION: +(this is the beginning of the file) 1|from flask import Flask 2|app = Flask(__name__) 3| @@ -83,7 +99,8 @@ def index(): 8| 9|if __name__ == '__main__': 10| app.run(port=5000) -[File updated. Please review the changes and make sure they are correct (correct indentation, no duplicate lines, etc). Edit the file again if necessary.] +(this is the end of the file) +[File updated (edited at line 1). Please review the changes and make sure they are correct (correct indentation, no duplicate lines, etc). Edit the file again if necessary.] ASSISTANT: I have created a Python file `app.py` that will display a list of numbers from 1 to 10 when you run it. Let me run the Python file for you: @@ -163,7 +180,7 @@ def index(): ASSISTANT: Sure! Let me browse the server's homepage at http://127.0.0.1:5000: -goto("http://127.0.0.1:5000") +Get the content on "http://127.0.0.1:5000" USER: @@ -193,6 +210,7 @@ def index(): USER: [File: /workspace/app.py (10 lines total)] +(this is the beginning of the file) 1|from flask import Flask 2|app = Flask(__name__) 3| @@ -203,16 +221,22 @@ def index(): 8| 9|if __name__ == '__main__': 10| app.run(port=5000) +(this is the end of the file) ASSISTANT: I should edit the file to display the numbers in a table format. I should include correct indentation. Let me update the file: -edit_file(start=7, end=7, content=" return '' + ''.join([f'' for i in numbers]) + '
{i}
'") +edit_file_by_replace( + 'app.py', + to_replace=" return str(numbers)", + new_content=" return '' + ''.join([f'' for i in numbers]) + '
{i}
'", +)
USER: Observation: [File: /workspace/app.py (10 lines total after edit)] +(this is the beginning of the file) 1|from flask import Flask 2|app = Flask(__name__) 3| @@ -223,10 +247,11 @@ def index(): 8| 9|if __name__ == '__main__': 10| app.run(port=5000) -[File updated. Please review the changes and make sure they are correct (correct indentation, no duplicate lines, etc). Edit the file again if necessary.] +(this is the end of the file) +[File updated (edited at line 7). Please review the changes and make sure they are correct (correct indentation, no duplicate lines, etc). Edit the file again if necessary.] ASSISTANT: -The file has been updated. Let me run the Python file again with the new changes: +Running the updated file: python3 app.py > server.log 2>&1 & @@ -236,14 +261,15 @@ def index(): [1] 126 ASSISTANT: -The server is running on port 5000 with PID 126. You can access the list of numbers in a table format by visiting http://127.0.0.1:5000. Free free to let me know if you have any further requests! +The server is running on port 5000 with PID 126. You can access the list of numbers in a table format by visiting http://127.0.0.1:5000. Let me know if you have any further requests! + --- END OF EXAMPLE --- """ INVALID_INPUT_MESSAGE = ( "I don't understand your input. \n" - 'If you want to execute a bash command, please use YOUR_COMMAND_HERE .\n' - 'If you want to execute a block of Python code, please use YOUR_COMMAND_HERE .\n' - 'If you want to browse the Internet, please use YOUR_COMMAND_HERE .\n' + 'For bash commands, use YOUR_COMMAND .\n' + 'For Python code, use YOUR_CODE .\n' + 'For browsing, use YOUR_COMMAND .\n' ) diff --git a/agenthub/codeact_swe_agent/README.md b/agenthub/codeact_swe_agent/README.md index 114eed7ec309..b52031a037ff 100644 --- a/agenthub/codeact_swe_agent/README.md +++ b/agenthub/codeact_swe_agent/README.md @@ -2,6 +2,6 @@ This agent is an adaptation of the original [SWE Agent](https://swe-agent.com/) based on CodeAct using the `agentskills` library of OpenDevin. -Its intended use is **solving Github issues**. +Its intended use is **solving GitHub issues**. -It removes web-browsing and Github capability from the original CodeAct agent to avoid confusion to the agent. +It removes web-browsing and GitHub capability from the original CodeAct agent to avoid confusion to the agent. diff --git a/agenthub/codeact_swe_agent/action_parser.py b/agenthub/codeact_swe_agent/action_parser.py new file mode 100644 index 000000000000..d51a6c6ec128 --- /dev/null +++ b/agenthub/codeact_swe_agent/action_parser.py @@ -0,0 +1,110 @@ +import re + +from opendevin.controller.action_parser import ActionParser +from opendevin.events.action import ( + Action, + AgentFinishAction, + CmdRunAction, + IPythonRunCellAction, + MessageAction, +) + + +class CodeActSWEActionParserFinish(ActionParser): + """Parser action: + - AgentFinishAction() - end the interaction + """ + + def __init__( + self, + ): + self.finish_command = None + + def check_condition(self, action_str: str) -> bool: + self.finish_command = re.search(r'.*', action_str, re.DOTALL) + return self.finish_command is not None + + def parse(self, action_str: str) -> Action: + assert ( + self.finish_command is not None + ), 'self.finish_command should not be None when parse is called' + thought = action_str.replace(self.finish_command.group(0), '').strip() + return AgentFinishAction(thought=thought) + + +class CodeActSWEActionParserCmdRun(ActionParser): + """Parser action: + - CmdRunAction(command) - bash command to run + - AgentFinishAction() - end the interaction + """ + + def __init__( + self, + ): + self.bash_command = None + + def check_condition(self, action_str: str) -> bool: + self.bash_command = re.search( + r'(.*?)', action_str, re.DOTALL + ) + return self.bash_command is not None + + def parse(self, action_str: str) -> Action: + assert ( + self.bash_command is not None + ), 'self.bash_command should not be None when parse is called' + thought = action_str.replace(self.bash_command.group(0), '').strip() + # a command was found + command_group = self.bash_command.group(1).strip() + if command_group.strip() == 'exit': + return AgentFinishAction() + return CmdRunAction(command=command_group, thought=thought) + + +class CodeActSWEActionParserIPythonRunCell(ActionParser): + """Parser action: + - IPythonRunCellAction(code) - IPython code to run + """ + + def __init__( + self, + ): + self.python_code = None + self.jupyter_kernel_init_code: str = 'from agentskills import *' + + def check_condition(self, action_str: str) -> bool: + self.python_code = re.search( + r'(.*?)', action_str, re.DOTALL + ) + return self.python_code is not None + + def parse(self, action_str: str) -> Action: + assert ( + self.python_code is not None + ), 'self.python_code should not be None when parse is called' + code_group = self.python_code.group(1).strip() + thought = action_str.replace(self.python_code.group(0), '').strip() + return IPythonRunCellAction( + code=code_group, + thought=thought, + kernel_init_code=self.jupyter_kernel_init_code, + ) + + +class CodeActSWEActionParserMessage(ActionParser): + """Parser action: + - MessageAction(content) - Message action to run (e.g. ask for clarification) + """ + + def __init__( + self, + ): + pass + + def check_condition(self, action_str: str) -> bool: + # We assume the LLM is GOOD enough that when it returns pure natural language + # it wants to talk to the user + return True + + def parse(self, action_str: str) -> Action: + return MessageAction(content=action_str, wait_for_response=True) diff --git a/agenthub/codeact_swe_agent/codeact_swe_agent.py b/agenthub/codeact_swe_agent/codeact_swe_agent.py index edc63885b93c..ff12f7eab20f 100644 --- a/agenthub/codeact_swe_agent/codeact_swe_agent.py +++ b/agenthub/codeact_swe_agent/codeact_swe_agent.py @@ -1,40 +1,32 @@ -import re - from agenthub.codeact_swe_agent.prompt import ( COMMAND_DOCS, - MINIMAL_SYSTEM_PREFIX, SWE_EXAMPLE, + SYSTEM_PREFIX, SYSTEM_SUFFIX, ) +from agenthub.codeact_swe_agent.response_parser import CodeActSWEResponseParser from opendevin.controller.agent import Agent from opendevin.controller.state.state import State +from opendevin.core.config import config from opendevin.events.action import ( Action, AgentFinishAction, - BrowseInteractiveAction, CmdRunAction, IPythonRunCellAction, MessageAction, ) from opendevin.events.observation import ( - BrowserOutputObservation, CmdOutputObservation, IPythonRunCellObservation, ) +from opendevin.events.serialization.event import truncate_content from opendevin.llm.llm import LLM from opendevin.runtime.plugins import ( AgentSkillsRequirement, JupyterRequirement, PluginRequirement, ) - - -def parse_response(response) -> str: - action = response.choices[0].message.content - for lang in ['bash', 'ipython', 'browse']: - if f'' in action and f'' not in action: - action += f'' - return action +from opendevin.runtime.tools import RuntimeTool def action_to_str(action: Action) -> str: @@ -42,8 +34,6 @@ def action_to_str(action: Action) -> str: return f'{action.thought}\n\n{action.command}\n' elif isinstance(action, IPythonRunCellAction): return f'{action.thought}\n\n{action.code}\n' - elif isinstance(action, BrowseInteractiveAction): - return f'{action.thought}\n\n{action.browser_actions}\n' elif isinstance(action, MessageAction): return action.content return '' @@ -51,8 +41,7 @@ def action_to_str(action: Action) -> str: def get_action_message(action: Action) -> dict[str, str] | None: if ( - isinstance(action, BrowseInteractiveAction) - or isinstance(action, CmdRunAction) + isinstance(action, CmdRunAction) or isinstance(action, IPythonRunCellAction) or isinstance(action, MessageAction) ): @@ -64,10 +53,13 @@ def get_action_message(action: Action) -> dict[str, str] | None: def get_observation_message(obs) -> dict[str, str] | None: + max_message_chars = config.get_llm_config_from_agent( + 'CodeActSWEAgent' + ).max_message_chars if isinstance(obs, CmdOutputObservation): - content = 'OBSERVATION:\n' + truncate_observation(obs.content) + content = 'OBSERVATION:\n' + truncate_content(obs.content, max_message_chars) content += ( - f'\n[Command {obs.command_id} finished with exit code {obs.exit_code}]]' + f'\n[Command {obs.command_id} finished with exit code {obs.exit_code}]' ) return {'role': 'user', 'content': content} elif isinstance(obs, IPythonRunCellObservation): @@ -80,30 +72,13 @@ def get_observation_message(obs) -> dict[str, str] | None: '![image](data:image/png;base64, ...) already displayed to user' ) content = '\n'.join(splitted) - content = truncate_observation(content) - return {'role': 'user', 'content': content} - elif isinstance(obs, BrowserOutputObservation): - content = 'OBSERVATION:\n' + truncate_observation(obs.content) + content = truncate_content(content, max_message_chars) return {'role': 'user', 'content': content} return None -def truncate_observation(observation: str, max_chars: int = 10_000) -> str: - """ - Truncate the middle of the observation if it is too long. - """ - if len(observation) <= max_chars: - return observation - half = max_chars // 2 - return ( - observation[:half] - + '\n[... Observation truncated due to length ...]\n' - + observation[-half:] - ) - - def get_system_message() -> str: - return f'{MINIMAL_SYSTEM_PREFIX}\n\n{COMMAND_DOCS}\n\n{SYSTEM_SUFFIX}' + return f'{SYSTEM_PREFIX}\n\n{COMMAND_DOCS}\n\n{SYSTEM_SUFFIX}' def get_in_context_example() -> str: @@ -111,7 +86,7 @@ def get_in_context_example() -> str: class CodeActSWEAgent(Agent): - VERSION = '1.5' + VERSION = '1.6' """ This agent is an adaptation of the original [SWE Agent](https://swe-agent.com/) based on CodeAct 1.5 using the `agentskills` library of OpenDevin. @@ -122,22 +97,23 @@ class CodeActSWEAgent(Agent): sandbox_plugins: list[PluginRequirement] = [ # NOTE: AgentSkillsRequirement need to go before JupyterRequirement, since - # AgentSkillsRequirement provides a lot of Python functions - # and it need to be initialized before Jupyter for Jupyter to use those functions. + # AgentSkillsRequirement provides a lot of Python functions, + # and it needs to be initialized before Jupyter for Jupyter to use those functions. AgentSkillsRequirement(), JupyterRequirement(), ] - jupyter_kernel_init_code: str = 'from agentskills import *' + runtime_tools: list[RuntimeTool] = [] system_message: str = get_system_message() in_context_example: str = f"Here is an example of how you can interact with the environment for task solving:\n{get_in_context_example()}\n\nNOW, LET'S START!" + response_parser = CodeActSWEResponseParser() + def __init__( self, llm: LLM, ) -> None: - """ - Initializes a new instance of the CodeActAgent class. + """Initializes a new instance of the CodeActAgent class. Parameters: - llm (LLM): The llm to be used by this agent @@ -146,14 +122,11 @@ def __init__( self.reset() def reset(self) -> None: - """ - Resets the CodeAct Agent. - """ + """Resets the CodeAct Agent.""" super().reset() def step(self, state: State) -> Action: - """ - Performs one step using the CodeAct Agent. + """Performs one step using the CodeAct Agent. This includes gathering info on previous steps and prompting the model to make a command to execute. Parameters: @@ -162,85 +135,56 @@ def step(self, state: State) -> Action: Returns: - CmdRunAction(command) - bash command to run - IPythonRunCellAction(code) - IPython code to run - - BrowseInteractiveAction(browsergym_command) - BrowserGym commands to run - MessageAction(content) - Message action to run (e.g. ask for clarification) - AgentFinishAction() - end the interaction """ - messages: list[dict[str, str]] = [ - {'role': 'system', 'content': self.system_message}, - {'role': 'user', 'content': self.in_context_example}, - ] - - for prev_action, obs in state.history: - action_message = get_action_message(prev_action) - if action_message: - messages.append(action_message) + # if we're done, go back + latest_user_message = state.history.get_last_user_message() + if latest_user_message and latest_user_message.strip() == '/exit': + return AgentFinishAction() - obs_message = get_observation_message(obs) - if obs_message: - messages.append(obs_message) + # prepare what we want to send to the LLM + messages: list[dict[str, str]] = self._get_messages(state) - latest_user_message = [m for m in messages if m['role'] == 'user'][-1] - if latest_user_message: - if latest_user_message['content'].strip() == '/exit': - return AgentFinishAction() - latest_user_message['content'] += ( - f'\n\nENVIRONMENT REMINDER: You have {state.max_iterations - state.iteration} turns left to complete the task.' - ) - - response = self.llm.do_completion( + response = self.llm.completion( messages=messages, stop=[ '
', '
', - '
', ], temperature=0.0, ) - action_str: str = parse_response(response) - state.num_of_chars += sum( - len(message['content']) for message in messages - ) + len(action_str) - - if finish_command := re.search(r'.*', action_str, re.DOTALL): - thought = action_str.replace(finish_command.group(0), '').strip() - return AgentFinishAction(thought=thought) - if bash_command := re.search( - r'(.*?)', action_str, re.DOTALL - ): - # remove the command from the action string to get thought - thought = action_str.replace(bash_command.group(0), '').strip() - # a command was found - command_group = bash_command.group(1).strip() - - if command_group.strip() == 'exit': - return AgentFinishAction() - return CmdRunAction(command=command_group, thought=thought) - elif python_code := re.search( - r'(.*?)', action_str, re.DOTALL - ): - # a code block was found - code_group = python_code.group(1).strip() - thought = action_str.replace(python_code.group(0), '').strip() - return IPythonRunCellAction( - code=code_group, - thought=thought, - kernel_init_code=self.jupyter_kernel_init_code, + return self.response_parser.parse(response) + + def _get_messages(self, state: State) -> list[dict[str, str]]: + messages = [ + {'role': 'system', 'content': self.system_message}, + {'role': 'user', 'content': self.in_context_example}, + ] + + for event in state.history.get_events(): + # create a regular message from an event + message = ( + get_action_message(event) + if isinstance(event, Action) + else get_observation_message(event) ) - elif browse_command := re.search( - r'(.*)', action_str, re.DOTALL - ): - # BrowserGym actions was found - browse_actions = browse_command.group(1).strip() - thought = action_str.replace(browse_command.group(0), '').strip() - return BrowseInteractiveAction( - browser_actions=browse_actions, thought=thought + + # add regular message + if message: + messages.append(message) + + # the latest user message is important: + # we want to remind the agent of the environment constraints + latest_user_message = next( + (m for m in reversed(messages) if m['role'] == 'user'), None + ) + + # add a reminder to the prompt + if latest_user_message: + latest_user_message['content'] += ( + f'\n\nENVIRONMENT REMINDER: You have {state.max_iterations - state.iteration} turns left to complete the task.' ) - else: - # We assume the LLM is GOOD enough that when it returns pure natural language - # it want to talk to the user - return MessageAction(content=action_str, wait_for_response=True) - def search_memory(self, query: str) -> list[str]: - raise NotImplementedError('Implement this abstract method') + return messages diff --git a/agenthub/codeact_swe_agent/prompt.py b/agenthub/codeact_swe_agent/prompt.py index cdcae56fcda3..b83c48900134 100644 --- a/agenthub/codeact_swe_agent/prompt.py +++ b/agenthub/codeact_swe_agent/prompt.py @@ -18,9 +18,13 @@ For example, you can list the files in the current directory by ls . """ +PIP_INSTALL_PREFIX = """The assistant can install Python packages using the %pip magic command in an IPython environment by using the following syntax: %pip install [package needed] and should always import packages and define variables before starting to use them.""" + +SYSTEM_PREFIX = MINIMAL_SYSTEM_PREFIX + PIP_INSTALL_PREFIX + SYSTEM_SUFFIX = """The assistant's response should be concise. -The assistant should include ONLY ONE or or in every one of the responses, unless the assistant is finished with the task or need more input or action from the user in order to proceed. -IMPORTANT: Whenever possible, execute the code for the user using or or instead of providing it. +The assistant should include ONLY ONE or in every one of the responses, unless the assistant is finished with the task or need more input or action from the user in order to proceed. +IMPORTANT: Whenever possible, execute the code for the user using or instead of providing it. """ SWE_EXAMPLE = """ diff --git a/agenthub/codeact_swe_agent/response_parser.py b/agenthub/codeact_swe_agent/response_parser.py new file mode 100644 index 000000000000..e8e8d34e9e51 --- /dev/null +++ b/agenthub/codeact_swe_agent/response_parser.py @@ -0,0 +1,46 @@ +from agenthub.codeact_swe_agent.action_parser import ( + CodeActSWEActionParserCmdRun, + CodeActSWEActionParserFinish, + CodeActSWEActionParserIPythonRunCell, + CodeActSWEActionParserMessage, +) +from opendevin.controller.action_parser import ResponseParser +from opendevin.events.action import Action + + +class CodeActSWEResponseParser(ResponseParser): + """Parser action: + - CmdRunAction(command) - bash command to run + - IPythonRunCellAction(code) - IPython code to run + - MessageAction(content) - Message action to run (e.g. ask for clarification) + - AgentFinishAction() - end the interaction + """ + + def __init__(self): + # Need pay attention to the item order in self.action_parsers + super().__init__() + self.action_parsers = [ + CodeActSWEActionParserFinish(), + CodeActSWEActionParserCmdRun(), + CodeActSWEActionParserIPythonRunCell(), + ] + self.default_parser = CodeActSWEActionParserMessage() + + def parse(self, response: str) -> Action: + action_str = self.parse_response(response) + return self.parse_action(action_str) + + def parse_response(self, response) -> str: + action = response.choices[0].message.content + if action is None: + return '' + for lang in ['bash', 'ipython']: + if f'' in action and f'' not in action: + action += f'' + return action + + def parse_action(self, action_str: str) -> Action: + for action_parser in self.action_parsers: + if action_parser.check_condition(action_str): + return action_parser.parse(action_str) + return self.default_parser.parse(action_str) diff --git a/agenthub/delegator_agent/agent.py b/agenthub/delegator_agent/agent.py index a6870c54c277..5a07cfc68c43 100644 --- a/agenthub/delegator_agent/agent.py +++ b/agenthub/delegator_agent/agent.py @@ -8,15 +8,13 @@ class DelegatorAgent(Agent): VERSION = '1.0' """ - The planner agent utilizes a special prompting strategy to create long term plans for solving problems. - The agent is given its previous action-observation pairs, current task, and hint based on last action taken at every step. + The Delegator Agent is responsible for delegating tasks to other agents based on the current task. """ current_delegate: str = '' def __init__(self, llm: LLM): - """ - Initialize the Delegator Agent with an LLM + """Initialize the Delegator Agent with an LLM Parameters: - llm (LLM): The llm to be used by this agent @@ -24,16 +22,15 @@ def __init__(self, llm: LLM): super().__init__(llm) def step(self, state: State) -> Action: - """ - Checks to see if current step is completed, returns AgentFinishAction if True. - Otherwise, creates a plan prompt and sends to model for inference, returning the result as the next action. + """Checks to see if current step is completed, returns AgentFinishAction if True. + Otherwise, delegates the task to the next agent in the pipeline. Parameters: - state (State): The current state given the previous actions and observations Returns: - AgentFinishAction: If the last state was 'completed', 'verified', or 'abandoned' - - Action: The next action to take based on llm response + - AgentDelegateAction: The next agent to delegate the task to """ if self.current_delegate == '': self.current_delegate = 'study' @@ -42,7 +39,9 @@ def step(self, state: State) -> Action: agent='StudyRepoForTaskAgent', inputs={'task': task} ) - last_observation = state.history[-1][1] + # last observation in history should be from the delegate + last_observation = state.history.get_last_observation() + if not isinstance(last_observation, AgentDelegateObservation): raise Exception('Last observation is not an AgentDelegateObservation') @@ -81,6 +80,3 @@ def step(self, state: State) -> Action: ) else: raise Exception('Invalid delegate state') - - def search_memory(self, query: str) -> list[str]: - return [] diff --git a/agenthub/dummy_agent/agent.py b/agenthub/dummy_agent/agent.py index 1dc29aaded69..e6cf291588c1 100644 --- a/agenthub/dummy_agent/agent.py +++ b/agenthub/dummy_agent/agent.py @@ -7,7 +7,6 @@ Action, AddTaskAction, AgentFinishAction, - AgentRecallAction, AgentRejectAction, BrowseInteractiveAction, BrowseURLAction, @@ -18,7 +17,6 @@ ModifyTaskAction, ) from opendevin.events.observation import ( - AgentRecallObservation, CmdOutputObservation, FileReadObservation, FileWriteObservation, @@ -31,8 +29,6 @@ """ FIXME: There are a few problems this surfaced * FileWrites seem to add an unintended newline at the end of the file -* command_id is sometimes a number, sometimes a string -* Why isn't the output of the background command split between two steps? * Browser not working """ @@ -40,8 +36,6 @@ 'ActionObs', {'action': Action, 'observations': list[Observation]} ) -BACKGROUND_CMD = 'echo "This is in the background" && sleep .1 && echo "This too"' - class DummyAgent(Agent): VERSION = '1.0' @@ -95,28 +89,6 @@ def __init__(self, llm: LLM): ) ], }, - { - 'action': CmdRunAction(command=BACKGROUND_CMD, background=True), - 'observations': [ - CmdOutputObservation( - 'Background command started. To stop it, send a `kill` action with command_id 42', - command_id='42', # type: ignore[arg-type] - command=BACKGROUND_CMD, - ), - CmdOutputObservation( - 'This is in the background\nThis too\n', - command_id='42', # type: ignore[arg-type] - command=BACKGROUND_CMD, - ), - ], - }, - { - 'action': AgentRecallAction(query='who am I?'), - 'observations': [ - AgentRecallObservation('', memories=['I am a computer.']), - # CmdOutputObservation('This too\n', command_id='42', command=BACKGROUND_CMD), - ], - }, { 'action': BrowseURLAction(url='https://google.com'), 'observations': [ @@ -145,11 +117,16 @@ def step(self, state: State) -> Action: time.sleep(0.1) if state.iteration > 0: prev_step = self.steps[state.iteration - 1] + + # a step is (action, observations list) if 'observations' in prev_step: + # one obs, at most expected_observations = prev_step['observations'] - hist_start = len(state.history) - len(expected_observations) + + # check if the history matches the expected observations + hist_events = state.history.get_last_events(len(expected_observations)) for i in range(len(expected_observations)): - hist_obs = event_to_dict(state.history[hist_start + i][1]) + hist_obs = event_to_dict(hist_events[i]) expected_obs = event_to_dict(expected_observations[i]) if ( 'command_id' in hist_obs['extras'] @@ -163,13 +140,7 @@ def step(self, state: State) -> Action: ): del expected_obs['extras']['command_id'] expected_obs['content'] = '' - if hist_obs != expected_obs: - print('\nactual', hist_obs) - print('\nexpect', expected_obs) assert ( hist_obs == expected_obs ), f'Expected observation {expected_obs}, got {hist_obs}' return self.steps[state.iteration]['action'] - - def search_memory(self, query: str) -> list[str]: - return ['I am a computer.'] diff --git a/agenthub/micro/README.md b/agenthub/micro/README.md index 68be0a8a9a42..959a8aad25a0 100644 --- a/agenthub/micro/README.md +++ b/agenthub/micro/README.md @@ -12,3 +12,6 @@ in the following structure: Note that `prompt.md` could use jinja2 template syntax. During runtime, `prompt.md` is loaded and rendered, and used together with `agent.yaml` to initialize a micro-agent. + +Micro-agents can be used independently. You can also use `ManagerAgent` which knows +how to coordinate the agents and collaboratively finish a task. diff --git a/agenthub/micro/_instructions/actions/kill.md b/agenthub/micro/_instructions/actions/kill.md index e3399e924d5b..e69de29bb2d1 100644 --- a/agenthub/micro/_instructions/actions/kill.md +++ b/agenthub/micro/_instructions/actions/kill.md @@ -1,2 +0,0 @@ -* `kill` - kills a background command - * `command_id` - the ID of the background command to kill diff --git a/agenthub/micro/_instructions/actions/reject.md b/agenthub/micro/_instructions/actions/reject.md index e4cb730635d8..8b89a9a20c1d 100644 --- a/agenthub/micro/_instructions/actions/reject.md +++ b/agenthub/micro/_instructions/actions/reject.md @@ -1,2 +1,2 @@ * `reject` - reject the task. Arguments: - * `outputs` - a dictionary representing the outputs of your task, if any + * `outputs` - a dictionary with only a `reason` attribute diff --git a/agenthub/micro/_instructions/actions/run.md b/agenthub/micro/_instructions/actions/run.md index 37695a312373..fc08f6145472 100644 --- a/agenthub/micro/_instructions/actions/run.md +++ b/agenthub/micro/_instructions/actions/run.md @@ -1,3 +1,2 @@ * `run` - runs a command on the command line in a Linux shell. Arguments: * `command` - the command to run - * `background` - if true, run the command in the background, so that other commands can be run concurrently. Useful for e.g. starting a server. You won't be able to see the logs. You don't need to end the command with `&`, just set this to true. diff --git a/agenthub/micro/agent.py b/agenthub/micro/agent.py index c44dc882aedf..faff8cdf272c 100644 --- a/agenthub/micro/agent.py +++ b/agenthub/micro/agent.py @@ -2,11 +2,13 @@ from opendevin.controller.agent import Agent from opendevin.controller.state.state import State +from opendevin.core.config import config from opendevin.core.utils import json from opendevin.events.action import Action from opendevin.events.serialization.action import action_from_dict from opendevin.events.serialization.event import event_to_memory from opendevin.llm.llm import LLM +from opendevin.memory.history import ShortTermHistory from .instructions import instructions from .registry import all_microagents @@ -21,24 +23,29 @@ def parse_response(orig_response: str) -> Action: def to_json(obj, **kwargs): - """ - Serialize an object to str format - """ + """Serialize an object to str format""" return json.dumps(obj, **kwargs) -def history_to_json(obj, **kwargs): - """ - Serialize and simplify history to str format - """ - if isinstance(obj, list): - # process history, make it simpler. - processed_history = [] - for action, observation in obj: - processed_history.append( - (event_to_memory(action), event_to_memory(observation)) - ) - return json.dumps(processed_history, **kwargs) +def history_to_json(history: ShortTermHistory, max_events=20, **kwargs): + """Serialize and simplify history to str format""" + # TODO: get agent specific llm config + llm_config = config.get_llm_config() + max_message_chars = llm_config.max_message_chars + + processed_history = [] + event_count = 0 + + for event in history.get_events(reverse=True): + if event_count >= max_events: + break + processed_history.append(event_to_memory(event, max_message_chars)) + event_count += 1 + + # history is in reverse order, let's fix it + processed_history.reverse() + + return json.dumps(processed_history, **kwargs) class MicroAgent(Agent): @@ -64,11 +71,7 @@ def step(self, state: State) -> Action: latest_user_message=state.get_current_user_intent(), ) messages = [{'content': prompt, 'role': 'user'}] - resp = self.llm.do_completion(messages=messages) + resp = self.llm.completion(messages=messages) action_resp = resp['choices'][0]['message']['content'] - state.num_of_chars += len(prompt) + len(action_resp) action = parse_response(action_resp) return action - - def search_memory(self, query: str) -> list[str]: - return [] diff --git a/agenthub/micro/coder/prompt.md b/agenthub/micro/coder/prompt.md index 5b8d3195f358..31d4439e2b36 100644 --- a/agenthub/micro/coder/prompt.md +++ b/agenthub/micro/coder/prompt.md @@ -21,7 +21,7 @@ Do NOT finish until you have completed the tasks. ## History {{ instructions.history_truncated }} -{{ history_to_json(state.history[-10:]) }} +{{ history_to_json(state.history, max_events=20) }} ## Format {{ instructions.format.action }} diff --git a/agenthub/micro/commit_writer/README.md b/agenthub/micro/commit_writer/README.md index 6c33eabe4c85..927bc67da286 100644 --- a/agenthub/micro/commit_writer/README.md +++ b/agenthub/micro/commit_writer/README.md @@ -3,7 +3,7 @@ CommitWriterAgent can help write git commit message. Example: ```bash -WORKSPACE_MOUNT_PATH="`PWD`" SANDBOX_TYPE="exec" \ +WORKSPACE_MOUNT_PATH="`PWD`" SANDBOX_BOX_TYPE="ssh" \ poetry run python opendevin/core/main.py -t "dummy task" -c CommitWriterAgent -d ./ ``` diff --git a/agenthub/micro/commit_writer/agent.yaml b/agenthub/micro/commit_writer/agent.yaml index 107924964d28..1228838c9efd 100644 --- a/agenthub/micro/commit_writer/agent.yaml +++ b/agenthub/micro/commit_writer/agent.yaml @@ -3,3 +3,4 @@ description: "Write a git commit message for files in the git staging area" inputs: {} outputs: answer: string + reason: string diff --git a/agenthub/micro/commit_writer/prompt.md b/agenthub/micro/commit_writer/prompt.md index 38322ba07778..4b857ca93a5c 100644 --- a/agenthub/micro/commit_writer/prompt.md +++ b/agenthub/micro/commit_writer/prompt.md @@ -11,14 +11,16 @@ changes. The commit message should include: - A summary line that clearly states the purpose of the changes. - Optionally, a detailed description if the changes are complex or need further explanation. -You should find the diff using `git diff --cached`, compile a commit message, -and call the `finish` action with `outputs.answer` set to the answer. If current -repo is not a valid git repo, or there is no diff in the staging area, please call -the `reject` action with `outputs.answer` set to the reason. +You should first use `git status` to check whether it's a valid git repo and there +is diff in the staging area. If not, please call the `reject` action. + +If it is a valid git repo and there is diff in the staging area, you should find +the diff using `git diff --cached`, compile a commit message, and call the `finish` +action with `outputs.answer` set to the answer. ## History {{ instructions.history_truncated }} -{{ history_to_json(state.history[-10:]) }} +{{ history_to_json(state.history, max_events=20) }} If the last item in the history is an error, you should try to fix it. diff --git a/agenthub/micro/manager/agent.yaml b/agenthub/micro/manager/agent.yaml index 5980f6f03fe9..d4e956618e1b 100644 --- a/agenthub/micro/manager/agent.yaml +++ b/agenthub/micro/manager/agent.yaml @@ -3,4 +3,6 @@ description: Delegates tasks to microagents based on their area of expertise generates: Action inputs: task: string -outputs: {} +outputs: + summary: string # if finished + reason: string # if rejected diff --git a/agenthub/micro/manager/prompt.md b/agenthub/micro/manager/prompt.md index 27080ccf0498..c21f275fa348 100644 --- a/agenthub/micro/manager/prompt.md +++ b/agenthub/micro/manager/prompt.md @@ -7,6 +7,16 @@ can do the actual work. A description of each agent is provided below. You MUST select one of the delegates below to move towards accomplishing the task, and you MUST provide the correct inputs for the delegate you select. +Note: the delegated agent either returns "finish" or "reject". +- If the action is "finish", but the full task is not done yet, you should +continue to delegate to one of the agents below to until the full task is finished. +- If the action is "reject", it means the delegated agent is not capable of the +task you send to. You should revisit the input you send to the delegate, and consider +whether any other delegate would be able to solve the task. If you cannot find +a proper delegate agent, or the delegate attempts keep failing, call the `reject` +action. In `reason` attribute, make sure you include your attempts (e.g. what agent +you have delegated to, and why they failed). + ## Agents {% for name, details in delegates.items() %} ### {{ name }} @@ -17,11 +27,15 @@ provide the correct inputs for the delegate you select. ## History {{ instructions.history_truncated }} -{{ history_to_json(state.history[-10:]) }} +{{ history_to_json(state.history, max_events=20) }} + +If the last item in the history is an error, you should try to fix it. If you +cannot fix it, call the `reject` action. ## Available Actions {{ instructions.actions.delegate }} {{ instructions.actions.finish }} +{{ instructions.actions.reject }} ## Format {{ instructions.format.action }} diff --git a/agenthub/micro/math_agent/prompt.md b/agenthub/micro/math_agent/prompt.md index c28640716072..08cf99a1cd22 100644 --- a/agenthub/micro/math_agent/prompt.md +++ b/agenthub/micro/math_agent/prompt.md @@ -10,7 +10,7 @@ and call the `finish` action with `outputs.answer` set to the answer. ## History {{ instructions.history_truncated }} -{{ history_to_json(state.history[-10:]) }} +{{ history_to_json(state.history, max_events=20) }} If the last item in the history is an error, you should try to fix it. diff --git a/agenthub/micro/postgres_agent/prompt.md b/agenthub/micro/postgres_agent/prompt.md index 5ddbfcb78a0d..aca2a38f6b26 100644 --- a/agenthub/micro/postgres_agent/prompt.md +++ b/agenthub/micro/postgres_agent/prompt.md @@ -18,7 +18,7 @@ You may take any of the following actions: ## History {{ instructions.history_truncated }} -{{ history_to_json(state.history[-10:]) }} +{{ history_to_json(state.history, max_events=20) }} ## Format {{ instructions.format.action }} diff --git a/agenthub/micro/repo_explorer/prompt.md b/agenthub/micro/repo_explorer/prompt.md index fde381e40ccd..b317a4e77b0e 100644 --- a/agenthub/micro/repo_explorer/prompt.md +++ b/agenthub/micro/repo_explorer/prompt.md @@ -20,7 +20,7 @@ When you're done, put your summary into the output of the `finish` action. ## History {{ instructions.history_truncated }} -{{ history_to_json(state.history[-10:]) }} +{{ history_to_json(state.history, max_events=20) }} ## Format {{ instructions.format.action }} diff --git a/agenthub/micro/study_repo_for_task/prompt.md b/agenthub/micro/study_repo_for_task/prompt.md index 3199181d1d55..91cdf3c3c6a0 100644 --- a/agenthub/micro/study_repo_for_task/prompt.md +++ b/agenthub/micro/study_repo_for_task/prompt.md @@ -20,11 +20,11 @@ Do NOT finish until you have a complete understanding of which parts of the codebase are relevant to the project, including particular files, functions, and classes. When you're done, put your summary in `outputs.summary` in the `finish` action. Remember, your task is to explore and study the current repository, not actually -implement the solution. If the codebase is empty, you shoud call the `finish` action. +implement the solution. If the codebase is empty, you should call the `finish` action. ## History {{ instructions.history_truncated }} -{{ history_to_json(state.history[-10:]) }} +{{ history_to_json(state.history, max_events=20) }} ## Format {{ instructions.format.action }} @@ -41,8 +41,7 @@ ASSISTANT: { "action": "run", "args": { - "command": "ls", - "background": false + "command": "ls" } } diff --git a/agenthub/micro/typo_fixer_agent/prompt.md b/agenthub/micro/typo_fixer_agent/prompt.md index a4d04769b32a..4440d9b56fe1 100644 --- a/agenthub/micro/typo_fixer_agent/prompt.md +++ b/agenthub/micro/typo_fixer_agent/prompt.md @@ -31,7 +31,7 @@ Do NOT finish until you have fixed all the typos and generated a summary. ## History {{ instructions.history_truncated }} -{{ history_to_json(state.history[-5:]) }} +{{ history_to_json(state.history, max_events=10) }} ## Format {{ instructions.format.action }} diff --git a/agenthub/micro/verifier/prompt.md b/agenthub/micro/verifier/prompt.md index 7e5ef79a2dc9..48c7a73cc45d 100644 --- a/agenthub/micro/verifier/prompt.md +++ b/agenthub/micro/verifier/prompt.md @@ -22,7 +22,7 @@ explaining what the problem is. ## History {{ instructions.history_truncated }} -{{ history_to_json(state.history[-10:]) }} +{{ history_to_json(state.history, max_events=20) }} ## Format {{ instructions.format.action }} diff --git a/agenthub/monologue_agent/agent.py b/agenthub/monologue_agent/agent.py index 35b82180fb04..92482f80ba03 100644 --- a/agenthub/monologue_agent/agent.py +++ b/agenthub/monologue_agent/agent.py @@ -1,4 +1,5 @@ import agenthub.monologue_agent.utils.prompts as prompts +from agenthub.monologue_agent.response_parser import MonologueResponseParser from agenthub.monologue_agent.utils.prompts import INITIAL_THOUGHTS from opendevin.controller.agent import Agent from opendevin.controller.state.state import State @@ -7,7 +8,6 @@ from opendevin.core.schema import ActionType from opendevin.events.action import ( Action, - AgentRecallAction, BrowseURLAction, CmdRunAction, FileReadAction, @@ -16,7 +16,6 @@ NullAction, ) from opendevin.events.observation import ( - AgentRecallObservation, BrowserOutputObservation, CmdOutputObservation, FileReadObservation, @@ -26,13 +25,11 @@ from opendevin.events.serialization.event import event_to_memory from opendevin.llm.llm import LLM from opendevin.memory.condenser import MemoryCondenser +from opendevin.runtime.tools import RuntimeTool -if config.agent.memory_enabled: +if config.get_agent_config('MonologueAgent').memory_enabled: from opendevin.memory.memory import LongTermMemory -MAX_TOKEN_COUNT_PADDING = 512 -MAX_OUTPUT_LENGTH = 5000 - class MonologueAgent(Agent): VERSION = '1.0' @@ -46,10 +43,11 @@ class MonologueAgent(Agent): initial_thoughts: list[dict[str, str]] memory: 'LongTermMemory | None' memory_condenser: MemoryCondenser + runtime_tools: list[RuntimeTool] = [RuntimeTool.BROWSER] + response_parser = MonologueResponseParser() def __init__(self, llm: LLM): - """ - Initializes the Monologue Agent with an llm. + """Initializes the Monologue Agent with an llm. Parameters: - llm (LLM): The llm to be used by this agent @@ -57,19 +55,17 @@ def __init__(self, llm: LLM): super().__init__(llm) def _initialize(self, task: str): - """ - Utilizes the INITIAL_THOUGHTS list to give the agent a context for its capabilities + """Utilizes the INITIAL_THOUGHTS list to give the agent a context for its capabilities and how to navigate the WORKSPACE_MOUNT_PATH_IN_SANDBOX in `config` (e.g., /workspace by default). Short circuited to return when already initialized. Will execute again when called after reset. Parameters: - - task (str): The initial goal statement provided by the user + - task: The initial goal statement provided by the user Raises: - AgentNoInstructionError: If task is not provided """ - if self._initialized: return @@ -77,7 +73,7 @@ def _initialize(self, task: str): raise AgentNoInstructionError() self.initial_thoughts = [] - if config.agent.memory_enabled: + if config.get_agent_config('MonologueAgent').memory_enabled: self.memory = LongTermMemory() else: self.memory = None @@ -88,6 +84,9 @@ def _initialize(self, task: str): self._initialized = True def _add_initial_thoughts(self, task): + max_message_chars = config.get_llm_config_from_agent( + 'MonologueAgent' + ).max_message_chars previous_action = '' for thought in INITIAL_THOUGHTS: thought = thought.replace('$TASK', task) @@ -99,13 +98,13 @@ def _add_initial_thoughts(self, task): ) elif previous_action == ActionType.READ: observation = FileReadObservation(content=thought, path='') - elif previous_action == ActionType.RECALL: - observation = AgentRecallObservation(content=thought, memories=[]) elif previous_action == ActionType.BROWSE: observation = BrowserOutputObservation( content=thought, url='', screenshot='' ) - self.initial_thoughts.append(event_to_memory(observation)) + self.initial_thoughts.append( + event_to_memory(observation, max_message_chars) + ) previous_action = '' else: action: Action = NullAction() @@ -122,21 +121,16 @@ def _add_initial_thoughts(self, task): path = thought.split('READ ')[1] action = FileReadAction(path=path) previous_action = ActionType.READ - elif thought.startswith('RECALL'): - query = thought.split('RECALL ')[1] - action = AgentRecallAction(query=query) - previous_action = ActionType.RECALL elif thought.startswith('BROWSE'): url = thought.split('BROWSE ')[1] action = BrowseURLAction(url=url) previous_action = ActionType.BROWSE else: action = MessageAction(thought) - self.initial_thoughts.append(event_to_memory(action)) + self.initial_thoughts.append(event_to_memory(action, max_message_chars)) def step(self, state: State) -> Action: - """ - Modifies the current state by adding the most recent actions and observations, then prompts the model to think about it's next action to take using monologue, memory, and hint. + """Modifies the current state by adding the most recent actions and observations, then prompts the model to think about it's next action to take using monologue, memory, and hint. Parameters: - state (State): The current state based on previous steps taken @@ -144,32 +138,35 @@ def step(self, state: State) -> Action: Returns: - Action: The next action to take based on LLM response """ - + max_message_chars = config.get_llm_config_from_agent( + 'MonologueAgent' + ).max_message_chars goal = state.get_current_user_intent() self._initialize(goal) recent_events: list[dict[str, str]] = [] # add the events from state.history - for prev_action, obs in state.history: - if not isinstance(prev_action, NullAction): - recent_events.append(event_to_memory(prev_action)) - if not isinstance(obs, NullObservation): - recent_events.append(self._truncate_output(event_to_memory(obs))) + for event in state.history.get_events(): + recent_events.append(event_to_memory(event, max_message_chars)) # add the last messages to long term memory - if self.memory is not None and state.history and len(state.history) > 0: - self.memory.add_event(event_to_memory(state.history[-1][0])) - self.memory.add_event( - self._truncate_output(event_to_memory(state.history[-1][1])) - ) + if self.memory is not None: + last_action = state.history.get_last_action() + last_observation = state.history.get_last_observation() + + # this should still work + # we will need to do this differently: find out if there really is an action or an observation in this step + if last_action: + self.memory.add_event(event_to_memory(last_action, max_message_chars)) + if last_observation: + self.memory.add_event( + event_to_memory(last_observation, max_message_chars) + ) # the action prompt with initial thoughts and recent events prompt = prompts.get_request_action_prompt( - goal, - self.initial_thoughts, - recent_events, - state.background_commands_obs, + goal, self.initial_thoughts, recent_events ) messages: list[dict[str, str]] = [ @@ -177,60 +174,12 @@ def step(self, state: State) -> Action: ] # format all as a single message, a monologue - resp = self.llm.do_completion(messages=messages) + resp = self.llm.completion(messages=messages) - # get the next action from the response - action_resp = resp['choices'][0]['message']['content'] - - # keep track of max_chars fallback option - state.num_of_chars += len(prompt) + len(action_resp) - - action = prompts.parse_action_response(action_resp) + action = self.response_parser.parse(resp) self.latest_action = action return action - def _truncate_output( - self, observation: dict, max_chars: int = MAX_OUTPUT_LENGTH - ) -> dict[str, str]: - """ - Truncates the output of an observation to a maximum number of characters. - - Parameters: - - output (str): The observation whose output to truncate - - max_chars (int): The maximum number of characters to allow - - Returns: - - str: The truncated output - """ - if ( - 'args' in observation - and 'output' in observation['args'] - and len(observation['args']['output']) > max_chars - ): - output = observation['args']['output'] - half = max_chars // 2 - observation['args']['output'] = ( - output[:half] - + '\n[... Output truncated due to length...]\n' - + output[-half:] - ) - return observation - - def search_memory(self, query: str) -> list[str]: - """ - Uses VectorIndexRetriever to find related memories within the long term memory. - Uses search to produce top 10 results. - - Parameters: - - query (str): The query that we want to find related memories for - - Returns: - - list[str]: A list of top 10 text results that matched the query - """ - if self.memory is None: - return [] - return self.memory.search(query) - def reset(self) -> None: super().reset() diff --git a/agenthub/monologue_agent/response_parser.py b/agenthub/monologue_agent/response_parser.py new file mode 100644 index 000000000000..f7dfce068999 --- /dev/null +++ b/agenthub/monologue_agent/response_parser.py @@ -0,0 +1,37 @@ +from opendevin.controller.action_parser import ResponseParser +from opendevin.core.utils import json +from opendevin.events.action import ( + Action, +) +from opendevin.events.serialization.action import action_from_dict + + +class MonologueResponseParser(ResponseParser): + def __init__(self): + super().__init__() + + def parse(self, response: str) -> Action: + action_str = self.parse_response(response) + return self.parse_action(action_str) + + def parse_response(self, response) -> str: + # get the next action from the response + return response['choices'][0]['message']['content'] + + def parse_action(self, action_str: str) -> Action: + """Parses a string to find an action within it + + Parameters: + - response (str): The string to be parsed + + Returns: + - Action: The action that was found in the response string + """ + # attempt to load the JSON dict from the response + action_dict = json.loads(action_str) + + if 'content' in action_dict: + # The LLM gets confused here. Might as well be robust + action_dict['contents'] = action_dict.pop('content') + + return action_from_dict(action_dict) diff --git a/agenthub/monologue_agent/utils/prompts.py b/agenthub/monologue_agent/utils/prompts.py index 0384fa884f0b..fe1a41e81178 100644 --- a/agenthub/monologue_agent/utils/prompts.py +++ b/agenthub/monologue_agent/utils/prompts.py @@ -3,9 +3,6 @@ from opendevin.events.action import ( Action, ) -from opendevin.events.observation import ( - CmdOutputObservation, -) from opendevin.events.serialization.action import action_from_dict ACTION_PROMPT = """ @@ -32,25 +29,18 @@ * `content` - the content to write to the file * `run` - runs a command. Arguments: * `command` - the command to run - * `background` - if true, run the command in the background, so that other commands can be run concurrently. Useful for e.g. starting a server. You won't be able to see the logs. You don't need to end the command with `&`, just set this to true. -* `kill` - kills a background command - * `command_id` - the ID of the background command to kill * `browse` - opens a web page. Arguments: * `url` - the URL to open * `push` - Push a branch from the current repo to github: * `owner` - the owner of the repo to push to * `repo` - the name of the repo to push to * `branch` - the name of the branch to push -* `recall` - recalls a past memory. Arguments: - * `query` - the query to search for * `message` - make a plan, set a goal, record your thoughts, or ask for more input from the user. Arguments: * `content` - the message to record * `wait_for_response` - set to `true` to wait for the user to respond before proceeding * `finish` - if you're absolutely certain that you've completed your task and have tested your work, use the finish action to stop working. -%(background_commands)s - -You MUST take time to think in between read, write, run, kill, browse, push, and recall actions--do this with the `message` action. +You MUST take time to think in between read, write, run, browse, and push actions--do this with the `message` action. You should never act twice in a row without thinking. But if your last several actions are all `message` actions, you should consider taking a different action. @@ -100,15 +90,7 @@ 'It seems like I have some kind of short term memory.', 'Each of my thoughts seems to be stored in a JSON array.', 'It seems whatever I say next will be added as an object to the list.', - 'But no one has perfect short-term memory. My list of thoughts will be summarized and condensed over time, losing information in the process.', - 'Fortunately I have long term memory!', - 'I can just perform a recall action, followed by the thing I want to remember. And then related thoughts just spill out!', - "Sometimes they're random thoughts that don't really have to do with what I wanted to remember. But usually they're exactly what I need!", - "Let's try it out!", - 'RECALL what it is I want to do', - "Here's what I want to do: $TASK", - 'How am I going to get there though?', - "Neat! And it looks like it's easy for me to use the command line too! I just have to perform a run action and include the command I want to run in the command argument. The command output just jumps into my head!", + "It looks like it's easy for me to use the command line too! I just have to perform a run action and include the command I want to run in the command argument. The command output just jumps into my head!", 'RUN echo "hello world"', 'hello world', 'Cool! I bet I can write files too using the write action.', @@ -138,8 +120,7 @@ def get_summarize_monologue_prompt(thoughts: list[dict]): - """ - Gets the prompt for summarizing the monologue + """Gets the prompt for summarizing the monologue Returns: - str: A formatted string with the current monologue within the prompt @@ -153,23 +134,16 @@ def get_request_action_prompt( task: str, thoughts: list[dict], recent_events: list[dict], - background_commands_obs: list[CmdOutputObservation] | None = None, ): - """ - Gets the action prompt formatted with appropriate values. + """Gets the action prompt formatted with appropriate values. Parameters: - task (str): The current task the agent is trying to accomplish - thoughts (list[dict]): The agent's current thoughts - - background_commands_obs (list[CmdOutputObservation]): list of all observed background commands running Returns: - str: Formatted prompt string with hint, task, monologue, and background commands included """ - - if background_commands_obs is None: - background_commands_obs = [] - hint = '' if len(recent_events) > 0: latest_event = recent_events[-1] @@ -187,15 +161,6 @@ def get_request_action_prompt( else: hint = "You're just getting started! What should you do first?" - bg_commands_message = '' - if len(background_commands_obs) > 0: - bg_commands_message = 'The following commands are running in the background:' - for command_obs in background_commands_obs: - bg_commands_message += ( - f'\n`{command_obs.command_id}`: {command_obs.command}' - ) - bg_commands_message += '\nYou can end any process by sending a `kill` action with the numerical `command_id` above.' - user = 'opendevin' if config.run_as_devin else 'root' monologue = thoughts + recent_events @@ -203,17 +168,15 @@ def get_request_action_prompt( return ACTION_PROMPT % { 'task': task, 'monologue': json.dumps(monologue, indent=2), - 'background_commands': bg_commands_message, 'hint': hint, 'user': user, - 'timeout': config.sandbox_timeout, + 'timeout': config.sandbox.timeout, 'WORKSPACE_MOUNT_PATH_IN_SANDBOX': config.workspace_mount_path_in_sandbox, } def parse_action_response(orig_response: str) -> Action: - """ - Parses a string to find an action within it + """Parses a string to find an action within it Parameters: - response (str): The string to be parsed @@ -232,8 +195,7 @@ def parse_action_response(orig_response: str) -> Action: def parse_summary_response(response: str) -> list[dict]: - """ - Parses a summary of the monologue + """Parses a summary of the monologue Parameters: - response (str): The response string to be parsed diff --git a/agenthub/planner_agent/agent.py b/agenthub/planner_agent/agent.py index 2dd4ec8be037..8d9cb30972d4 100644 --- a/agenthub/planner_agent/agent.py +++ b/agenthub/planner_agent/agent.py @@ -1,9 +1,11 @@ +from agenthub.monologue_agent.response_parser import MonologueResponseParser from opendevin.controller.agent import Agent from opendevin.controller.state.state import State from opendevin.events.action import Action, AgentFinishAction from opendevin.llm.llm import LLM +from opendevin.runtime.tools import RuntimeTool -from .prompt import get_prompt, parse_response +from .prompt import get_prompt class PlannerAgent(Agent): @@ -12,10 +14,11 @@ class PlannerAgent(Agent): The planner agent utilizes a special prompting strategy to create long term plans for solving problems. The agent is given its previous action-observation pairs, current task, and hint based on last action taken at every step. """ + runtime_tools: list[RuntimeTool] = [RuntimeTool.BROWSER] + response_parser = MonologueResponseParser() def __init__(self, llm: LLM): - """ - Initialize the Planner Agent with an LLM + """Initialize the Planner Agent with an LLM Parameters: - llm (LLM): The llm to be used by this agent @@ -23,8 +26,7 @@ def __init__(self, llm: LLM): super().__init__(llm) def step(self, state: State) -> Action: - """ - Checks to see if current step is completed, returns AgentFinishAction if True. + """Checks to see if current step is completed, returns AgentFinishAction if True. Otherwise, creates a plan prompt and sends to model for inference, returning the result as the next action. Parameters: @@ -34,7 +36,6 @@ def step(self, state: State) -> Action: - AgentFinishAction: If the last state was 'completed', 'verified', or 'abandoned' - Action: The next action to take based on llm response """ - if state.root_task.state in [ 'completed', 'verified', @@ -43,11 +44,5 @@ def step(self, state: State) -> Action: return AgentFinishAction() prompt = get_prompt(state) messages = [{'content': prompt, 'role': 'user'}] - resp = self.llm.do_completion(messages=messages) - action_resp = resp['choices'][0]['message']['content'] - state.num_of_chars += len(prompt) + len(action_resp) - action = parse_response(action_resp) - return action - - def search_memory(self, query: str) -> list[str]: - return [] + resp = self.llm.completion(messages=messages) + return self.response_parser.parse(resp) diff --git a/agenthub/planner_agent/prompt.py b/agenthub/planner_agent/prompt.py index 955fca3ecb6e..aebebe1abfcf 100644 --- a/agenthub/planner_agent/prompt.py +++ b/agenthub/planner_agent/prompt.py @@ -1,4 +1,5 @@ from opendevin.controller.state.state import State +from opendevin.core.config import config from opendevin.core.logger import opendevin_logger as logger from opendevin.core.schema import ActionType from opendevin.core.utils import json @@ -6,13 +7,10 @@ Action, NullAction, ) -from opendevin.events.observation import ( - NullObservation, -) from opendevin.events.serialization.action import action_from_dict from opendevin.events.serialization.event import event_to_memory -HISTORY_SIZE = 10 +HISTORY_SIZE = 20 prompt = """ # Task @@ -77,9 +75,6 @@ * `content` - the content to write to the file * `run` - runs a command on the command line in a Linux shell. Arguments: * `command` - the command to run - * `background` - if true, run the command in the background, so that other commands can be run concurrently. Useful for e.g. starting a server. You won't be able to see the logs. You don't need to end the command with `&`, just set this to true. -* `kill` - kills a background command - * `command_id` - the ID of the background command to kill * `browse` - opens a web page. Arguments: * `url` - the URL to open * `message` - make a plan, set a goal, record your thoughts, or ask for more input from the user. Arguments: @@ -94,7 +89,7 @@ * `state` - set to 'in_progress' to start the task, 'completed' to finish it, 'verified' to assert that it was successful, 'abandoned' to give up on it permanently, or `open` to stop working on it for now. * `finish` - if ALL of your tasks and subtasks have been verified or abandoned, and you're absolutely certain that you've completed your task and have tested your work, use the finish action to stop working. -You MUST take time to think in between read, write, run, kill, browse, and recall actions--do this with the `message` action. +You MUST take time to think in between read, write, run, and browse actions--do this with the `message` action. You should never act twice in a row without thinking. But if your last several actions are all `message` actions, you should consider taking a different action. @@ -106,7 +101,6 @@ def get_hint(latest_action_id: str) -> str: """Returns action type hint based on given action_id""" - hints = { '': "You haven't taken any actions yet. Start by using `ls` to check out what files you're working with.", ActionType.RUN: 'You should think about the command you just ran, what output it gave, and how that affects your plan.', @@ -114,7 +108,6 @@ def get_hint(latest_action_id: str) -> str: ActionType.WRITE: 'You just changed a file. You should think about how it affects your plan.', ActionType.BROWSE: 'You should think about the page you just visited, and what you learned from it.', ActionType.MESSAGE: "Look at your last thought in the history above. What does it suggest? Don't think anymore--take action.", - ActionType.RECALL: 'You should think about the information you just recalled, and how it should affect your plan.', ActionType.ADD_TASK: 'You should think about the next action to take.', ActionType.MODIFY_TASK: 'You should think about the next action to take.', ActionType.SUMMARIZE: '', @@ -124,8 +117,7 @@ def get_hint(latest_action_id: str) -> str: def get_prompt(state: State) -> str: - """ - Gets the prompt for the planner agent. + """Gets the prompt for the planner agent. Formatted with the most recent action-observation pairs, current task, and hint based on last action Parameters: @@ -134,19 +126,32 @@ def get_prompt(state: State) -> str: Returns: - str: The formatted string prompt with historical values """ + max_message_chars = config.get_llm_config_from_agent( + 'PlannerAgent' + ).max_message_chars + # the plan plan_str = json.dumps(state.root_task.to_dict(), indent=2) - sub_history = state.history[-HISTORY_SIZE:] + + # the history history_dicts = [] latest_action: Action = NullAction() - for action, observation in sub_history: - if not isinstance(action, NullAction): - history_dicts.append(event_to_memory(action)) - latest_action = action - if not isinstance(observation, NullObservation): - observation_dict = event_to_memory(observation) - history_dicts.append(observation_dict) + + # retrieve the latest HISTORY_SIZE events + for event_count, event in enumerate(state.history.get_events(reverse=True)): + if event_count >= HISTORY_SIZE: + break + if latest_action == NullAction() and isinstance(event, Action): + latest_action = event + history_dicts.append(event_to_memory(event, max_message_chars)) + + # history_dicts is in reverse order, lets fix it + history_dicts.reverse() + + # and get it as a JSON string history_str = json.dumps(history_dicts, indent=2) + + # the plan status current_task = state.root_task.get_current_task() if current_task is not None: plan_status = f"You're currently working on this task:\n{current_task.goal}." @@ -154,9 +159,15 @@ def get_prompt(state: State) -> str: plan_status += "\nIf it's not achievable AND verifiable with a SINGLE action, you MUST break it down into subtasks NOW." else: plan_status = "You're not currently working on any tasks. Your next action MUST be to mark a task as in_progress." - hint = get_hint(event_to_memory(latest_action).get('action', '')) + + # the hint, based on the last action + hint = get_hint(event_to_memory(latest_action, max_message_chars).get('action', '')) logger.info('HINT:\n' + hint, extra={'msg_type': 'DETAIL'}) + + # the last relevant user message (the task) task = state.get_current_user_intent() + + # finally, fill in the prompt return prompt % { 'task': task, 'plan': plan_str, @@ -167,9 +178,7 @@ def get_prompt(state: State) -> str: def parse_response(response: str) -> Action: - """ - Parses the model output to find a valid action to take - + """Parses the model output to find a valid action to take Parameters: - response (str): A response from the model that potentially contains an Action. diff --git a/config.template.toml b/config.template.toml new file mode 100644 index 000000000000..c5fc9768a011 --- /dev/null +++ b/config.template.toml @@ -0,0 +1,203 @@ +###################### OpenDevin Configuration Example ###################### +# +# All settings have default values, so you only need to uncomment and +# modify what you want to change +# The fields within each section are sorted in alphabetical order. +# +############################################################################## + +#################################### Core #################################### +# General core configurations +############################################################################## +[core] +# API key for E2B +#e2b_api_key = "" + +# Base path for the workspace +workspace_base = "./workspace" + +# Cache directory path +#cache_dir = "/tmp/cache" + +# Debugging enabled +#debug = false + +# Disable color in terminal output +#disable_color = false + +# Enable auto linting after editing +#enable_auto_lint = false + +# Enable saving and restoring the session when run from CLI +#enable_cli_session = false + +# File store path +#file_store_path = "/tmp/file_store" + +# File store type +#file_store = "memory" + +# List of allowed file extensions for uploads +#file_uploads_allowed_extensions = [".*"] + +# Maximum file size for uploads, in megabytes +#file_uploads_max_file_size_mb = 0 + +# Maximum budget per task, 0.0 means no limit +#max_budget_per_task = 0.0 + +# Maximum number of iterations +#max_iterations = 100 + +# Path to mount the workspace in the sandbox +#workspace_mount_path_in_sandbox = "/workspace" + +# Path to mount the workspace +#workspace_mount_path = "" + +# Path to rewrite the workspace mount path to +#workspace_mount_rewrite = "" + +# Persist the sandbox +persist_sandbox = false + +# Run as devin +#run_as_devin = true + +# Runtime environment +#runtime = "server" + +# SSH hostname for the sandbox +#ssh_hostname = "localhost" + +# SSH password for the sandbox +#ssh_password = "" + +# SSH port for the sandbox +#ssh_port = 63710 + +# Use host network +#use_host_network = false + +# Name of the default agent +#default_agent = "CodeActAgent" + +#################################### LLM ##################################### +# Configuration for LLM models (group name starts with 'llm') +# use 'llm' for the default LLM config +############################################################################## +[llm] +# AWS access key ID +#aws_access_key_id = "" + +# AWS region name +#aws_region_name = "" + +# AWS secret access key +#aws_secret_access_key = "" + +# API key to use +api_key = "your-api-key" + +# API base URL +#base_url = "" + +# API version +#api_version = "" + +# Cost per input token +#input_cost_per_token = 0.0 + +# Cost per output token +#output_cost_per_token = 0.0 + +# Custom LLM provider +#custom_llm_provider = "" + +# Embedding API base URL +#embedding_base_url = "" + +# Embedding deployment name +#embedding_deployment_name = "" + +# Embedding model to use +embedding_model = "" + +# Maximum number of characters in an observation's content +#max_message_chars = 10000 + +# Maximum number of input tokens +#max_input_tokens = 0 + +# Maximum number of output tokens +#max_output_tokens = 0 + +# Model to use +model = "gpt-4o" + +# Number of retries to attempt +#num_retries = 5 + +# Retry maximum wait time +#retry_max_wait = 60 + +# Retry minimum wait time +#retry_min_wait = 3 + +# Temperature for the API +#temperature = 0.0 + +# Timeout for the API +#timeout = 0 + +# Top p for the API +#top_p = 0.5 + +[llm.gpt3] +# API key to use +api_key = "your-api-key" + +# Model to use +model = "gpt-3.5" + +#################################### Agent ################################### +# Configuration for agents (group name starts with 'agent') +# Use 'agent' for the default agent config +# otherwise, group name must be `agent.` (case-sensitive), e.g. +# agent.CodeActAgent +############################################################################## +[agent] +# Memory enabled +#memory_enabled = false + +# Memory maximum threads +#memory_max_threads = 2 + +# LLM config group to use +#llm_config = 'llm' + +[agent.RepoExplorerAgent] +# Example: use a cheaper model for RepoExplorerAgent to reduce cost, especially +# useful when an agent doesn't demand high quality but uses a lot of tokens +llm_config = 'gpt3' + +#################################### Sandbox ################################### +# Configuration for the sandbox +############################################################################## +[sandbox] +# Sandbox timeout in seconds +#timeout = 120 + +# Sandbox type (ssh, e2b, local) +#box_type = "ssh" + +# Sandbox user ID +#user_id = 1000 + +# Container image to use for the sandbox +#container_image = "ghcr.io/opendevin/sandbox:main" + +#################################### Eval #################################### +# Configuration for the evaluation, please refer to the specific evaluation +# plugin for the available options +############################################################################## diff --git a/containers/README.md b/containers/README.md index 1f66cbd6cfad..526bfee9381d 100644 --- a/containers/README.md +++ b/containers/README.md @@ -1,11 +1,12 @@ # Docker Containers + Each folder here contains a Dockerfile, and a config.sh describing how to build -the image and where to push it. These are images are built and pushed in GitHub Actions +the images and where to push them. These images are built and pushed in GitHub Actions by the `ghcr.yml` workflow. ## Building Manually -``` +```bash docker build -f containers/app/Dockerfile -t opendevin . docker build -f containers/sandbox/Dockerfile -t sandbox . ``` diff --git a/containers/app/Dockerfile b/containers/app/Dockerfile index ffcea9f8e5cd..3a883b19f999 100644 --- a/containers/app/Dockerfile +++ b/containers/app/Dockerfile @@ -10,7 +10,7 @@ RUN npm ci COPY ./frontend ./ RUN npm run make-i18n && npm run build -FROM python:3.12-slim as backend-builder +FROM python:3.12.3-slim as backend-builder WORKDIR /app ENV PYTHONPATH '/app' @@ -28,7 +28,7 @@ COPY ./pyproject.toml ./poetry.lock ./ RUN touch README.md RUN poetry install --without evaluation --no-root && rm -rf $POETRY_CACHE_DIR -FROM python:3.12-slim as runtime +FROM python:3.12.3-slim as runtime WORKDIR /app diff --git a/containers/app/entrypoint.sh b/containers/app/entrypoint.sh index c6fb252f03be..565925456fab 100755 --- a/containers/app/entrypoint.sh +++ b/containers/app/entrypoint.sh @@ -50,6 +50,7 @@ else groupadd -g $DOCKER_SOCKET_GID docker fi + mkdir -p /home/enduser/.cache/huggingface/hub/ mkdir -p /home/enduser/.cache/ms-playwright/ mv /home/opendevin/.cache/ms-playwright/ /home/enduser/.cache/ diff --git a/containers/build.sh b/containers/build.sh index 66a3a31c414c..1ac069d95199 100755 --- a/containers/build.sh +++ b/containers/build.sh @@ -3,65 +3,68 @@ set -eo pipefail image_name=$1 org_name=$2 -push=0 -if [[ $3 == "--push" ]]; then - push=1 -fi +platform=$3 -echo -e "Building: $image_name" +echo "Building: $image_name for platform: $platform" tags=() OPEN_DEVIN_BUILD_VERSION="dev" -cache_tag_base="buildcache" -cache_tag="$cache_tag_base" - if [[ -n $GITHUB_REF_NAME ]]; then # check if ref name is a version number if [[ $GITHUB_REF_NAME =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then - major_version=$(echo $GITHUB_REF_NAME | cut -d. -f1) - minor_version=$(echo $GITHUB_REF_NAME | cut -d. -f1,2) - tags+=($major_version $minor_version) + major_version=$(echo "$GITHUB_REF_NAME" | cut -d. -f1) + minor_version=$(echo "$GITHUB_REF_NAME" | cut -d. -f1,2) + tags+=("$major_version" "$minor_version") + tags+=("latest") fi - sanitized=$(echo $GITHUB_REF_NAME | sed 's/[^a-zA-Z0-9.-]\+/-/g') + sanitized=$(echo "$GITHUB_REF_NAME" | sed 's/[^a-zA-Z0-9.-]\+/-/g') OPEN_DEVIN_BUILD_VERSION=$sanitized - cache_tag+="-${sanitized}" - tags+=($sanitized) + tag=$(echo "$sanitized" | tr '[:upper:]' '[:lower:]') # lower case is required in tagging + tags+=("$tag") fi echo "Tags: ${tags[@]}" -dir=./containers/$image_name -if [ ! -f $dir/Dockerfile ]; then +if [[ "$image_name" == "opendevin" ]]; then + dir="./containers/app" +else + dir="./containers/$image_name" +fi + +if [[ ! -f "$dir/Dockerfile" ]]; then echo "No Dockerfile found" exit 1 fi -if [ ! -f $dir/config.sh ]; then +if [[ ! -f "$dir/config.sh" ]]; then echo "No config.sh found for Dockerfile" exit 1 fi -source $dir/config.sh + +source "$dir/config.sh" + if [[ -n "$org_name" ]]; then DOCKER_ORG="$org_name" fi -DOCKER_REPOSITORY=$DOCKER_REGISTRY/$DOCKER_ORG/$DOCKER_IMAGE + +DOCKER_REPOSITORY="$DOCKER_REGISTRY/$DOCKER_ORG/$DOCKER_IMAGE" DOCKER_REPOSITORY=${DOCKER_REPOSITORY,,} # lowercase echo "Repo: $DOCKER_REPOSITORY" echo "Base dir: $DOCKER_BASE_DIR" args="" -for tag in ${tags[@]}; do +for tag in "${tags[@]}"; do args+=" -t $DOCKER_REPOSITORY:$tag" done -if [[ $push -eq 1 ]]; then - args+=" --push" - args+=" --cache-to=type=registry,ref=$DOCKER_REPOSITORY:$cache_tag,mode=max" -fi + +output_image="/tmp/${image_name}_image_${platform}.tar" docker buildx build \ $args \ - --build-arg OPEN_DEVIN_BUILD_VERSION=$OPEN_DEVIN_BUILD_VERSION \ - --cache-from=type=registry,ref=$DOCKER_REPOSITORY:$cache_tag \ - --cache-from=type=registry,ref=$DOCKER_REPOSITORY:$cache_tag_base-main \ - --platform linux/amd64,linux/arm64 \ + --build-arg OPEN_DEVIN_BUILD_VERSION="$OPEN_DEVIN_BUILD_VERSION" \ + --platform linux/$platform \ --provenance=false \ - -f $dir/Dockerfile $DOCKER_BASE_DIR + -f "$dir/Dockerfile" \ + --output type=docker,dest="$output_image" \ + "$DOCKER_BASE_DIR" + +echo "${tags[*]}" > tags.txt diff --git a/containers/sandbox/Dockerfile b/containers/sandbox/Dockerfile index 6da20b7dfec6..9a2c8ce6a18e 100644 --- a/containers/sandbox/Dockerfile +++ b/containers/sandbox/Dockerfile @@ -16,13 +16,11 @@ RUN apt-get update && apt-get install -y \ build-essential \ openssh-server \ sudo \ - bash \ gcc \ jq \ g++ \ make \ iproute2 \ - libgl1-mesa-glx \ && rm -rf /var/lib/apt/lists/* RUN mkdir -p -m0755 /var/run/sshd @@ -30,8 +28,17 @@ RUN mkdir -p -m0755 /var/run/sshd # symlink python3 to python RUN ln -s /usr/bin/python3 /usr/bin/python -# install basic dependencies for CodeActAgent -RUN pip3 install --upgrade pip -RUN pip3 install jupyterlab notebook jupyter_kernel_gateway flake8 -# TODO: those dependencies are needed for agentskills, we should pack them in a new sandbox image -RUN pip3 install python-docx PyPDF2 python-pptx pylatexenc openai opencv-python +# ==== OpenDevin Runtime Client ==== +RUN mkdir -p /opendevin && mkdir -p /opendevin/logs && chmod 777 /opendevin/logs +RUN wget --progress=bar:force -O Miniforge3.sh "https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-$(uname)-$(uname -m).sh" +RUN bash Miniforge3.sh -b -p /opendevin/miniforge3 +RUN chmod -R g+w /opendevin/miniforge3 +RUN bash -c ". /opendevin/miniforge3/etc/profile.d/conda.sh && conda config --set changeps1 False && conda config --append channels conda-forge" +RUN echo "" > /opendevin/bash.bashrc +RUN rm -f Miniforge3.sh + +# - agentskills dependencies +RUN /opendevin/miniforge3/bin/pip install --upgrade pip +RUN /opendevin/miniforge3/bin/pip install jupyterlab notebook jupyter_kernel_gateway flake8 +RUN /opendevin/miniforge3/bin/pip install python-docx PyPDF2 python-pptx pylatexenc openai +RUN /opendevin/miniforge3/bin/pip install python-dotenv toml termcolor pydantic python-docx pyyaml docker pexpect tenacity e2b browsergym minio diff --git a/docs/docusaurus.config.ts b/docs/docusaurus.config.ts index 60d685c5289d..038918042637 100644 --- a/docs/docusaurus.config.ts +++ b/docs/docusaurus.config.ts @@ -8,8 +8,8 @@ const config: Config = { favicon: "img/logo.png", // Set the production url of your site here - url: "https://OpenDevin.github.io", - baseUrl: "/OpenDevin/", + url: "https://docs.all-hands.dev", + baseUrl: "/", // GitHub pages deployment config. organizationName: "OpenDevin", @@ -23,8 +23,13 @@ const config: Config = { // useful metadata like html lang. For example, if your site is Chinese, you // may want to replace "en" with "zh-Hans". i18n: { - defaultLocale: "en", - locales: ["en"], + defaultLocale: 'en', + locales: ['en', 'fr', 'zh-Hans'], + localeConfigs: { + en: { + htmlLang: 'en-GB', + }, + }, }, presets: [ @@ -51,7 +56,6 @@ const config: Config = { } satisfies Preset.Options, ], ], - themeConfig: { image: "img/docusaurus.png", navbar: { @@ -79,6 +83,10 @@ const config: Config = { label: "GitHub", position: "right", }, + { + type: 'localeDropdown', + position: 'left', + }, ], }, prism: { diff --git a/docs/i18n/fr/code.json b/docs/i18n/fr/code.json new file mode 100644 index 000000000000..b43792f7f80c --- /dev/null +++ b/docs/i18n/fr/code.json @@ -0,0 +1,406 @@ +{ + "footer.title": { + "message": "OpenDevin" + }, + "footer.docs": { + "message": "Documents" + }, + "footer.community": { + "message": "Communauté" + }, + "footer.copyright": { + "message": "© {year} OpenDevin" + }, + "faq.title": { + "message": "Questions Fréquemment Posées", + "description": "FAQ Title" + }, + "faq.description": { + "message": "Questions Fréquemment Posées" + }, + "faq.section.title.1": { + "message": "Qu'est-ce qu'OpenDevin ?", + "description": "First Section Title" + }, + "faq.section.highlight": { + "message": "OpenDevin", + "description": "Highlight Text" + }, + "faq.section.description.1": { + "message": "est un ingénieur logiciel autonome qui peut résoudre des tâches d'ingénierie logicielle et de navigation web à tout moment. Il peut exécuter des requêtes en sciences des données, telles que \"Trouver le nombre de demandes de pull à l'repository OpenDevin dans les derniers mois\", et des tâches d'ingénierie logicielle, comme \"Veuillez ajouter des tests à ce fichier et vérifier si tous les tests passent. Si ce n'est pas le cas, réparez le fichier.\"", + "description": "Description for OpenDevin" + }, + "faq.section.description.2": { + "message": "De plus, OpenDevin est une plateforme et communauté pour les développeurs d'agents qui souhaitent tester et évaluer de nouveaux agents.", + "description": "Further Description for OpenDevin" + }, + "faq.section.title.2": { + "message": "Support", + "description": "Support Section Title" + }, + "faq.section.support.answer": { + "message": "Si vous rencontrez un problème que d'autres utilisateurs peuvent également avoir, merci de le signaler sur {githubLink}. Si vous avez des difficultés à l'installation ou des questions générales, rejoignez-vous sur {discordLink} ou {slackLink}.", + "description": "Support Answer" + }, + "faq.section.title.3": { + "message": "Comment résoudre un problème sur GitHub avec OpenDevin ?", + "description": "GitHub Issue Section Title" + }, + "faq.section.github.steps.intro": { + "message": "Pour résoudre un problème sur GitHub en utilisant OpenDevin, envoyez une commande à OpenDevin demandant qu'il suit des étapes comme les suivantes :", + "description": "GitHub Steps Introduction" + }, + "faq.section.github.step1": { + "message": "Lisez l'issue https://github.com/OpenDevin/OpenDevin/issues/1611", + "description": "GitHub Step 1" + }, + "faq.section.github.step2": { + "message": "Cloner le dépôt et vérifier une nouvelle branche", + "description": "GitHub Step 2" + }, + "faq.section.github.step3": { + "message": "Sur la base des instructions dans la description de l'issue, modifiez les fichiers pour résoudre le problème", + "description": "GitHub Step 3" + }, + "faq.section.github.step4": { + "message": "Pousser le résultat à GitHub en utilisant la variable d'environnement GITHUB_TOKEN", + "description": "GitHub Step 4" + }, + "faq.section.github.step5": { + "message": "Dites-moi le lien que je dois utiliser pour envoyer une demande de pull", + "description": "GitHub Step 5" + }, + "faq.section.github.steps.preRun": { + "message": "Avant de lancer OpenDevin, vous pouvez faire :", + "description": "GitHub Steps Pre-Run" + }, + "faq.section.github.steps.tokenInfo": { + "message": "où XXX est un jeton GitHub que vous avez créé et qui a les autorisations pour pousser dans le dépôt OpenDevin. Si vous n'avez pas d'autorisations de modification du dépôt OpenDevin, vous devrez peut-être changer cela en :", + "description": "GitHub Steps Token Info" + }, + "faq.section.github.steps.usernameInfo": { + "message": "où USERNAME est votre nom GitHub.", + "description": "GitHub Steps Username Info" + }, + "faq.section.title.4": { + "message": "Comment OpenDevin est-il différent de Devin ?", + "description": "Devin Section Title" + }, + "faq.section.devin.linkText": { + "message": "Devin", + "description": "Devin Link Text" + }, + "faq.section.devin.description": { + "message": "est un produit commercial par Cognition Inc., qui a servi d'inspiration initiale pour OpenDevin. Les deux visent à bien faire le travail d'ingénierie logicielle, mais vous pouvez télécharger, utiliser et modifier OpenDevin, tandis que Devin peut être utilisé uniquement via le site de Cognition. De plus, OpenDevin a évolué au-delà de l'inspiration initiale, et est maintenant un écosystème communautaire pour le développement d'agents en général, et nous serions ravis de vous voir rejoindre et", + "description": "Devin Description" + }, + "faq.section.devin.contribute": { + "message": "contribuer", + "description": "Contribute Link" + }, + "faq.section.title.5": { + "message": "Comment OpenDevin est-il différent de ChatGPT ?", + "description": "ChatGPT Section Title" + }, + "faq.section.chatgpt.description": { + "message": "ChatGPT vous pouvez accéder en ligne, il ne se connecte pas aux fichiers locaux et ses capacités d'exécution du code sont limitées. Alors qu'il peut écrire du code, mais c'est difficile à tester ou à exécuter.", + "description": "ChatGPT Description" + }, + "homepage.description": { + "message": "Génération d'code AI pour l'ingénierie logicielle.", + "description": "The homepage description" + }, + "homepage.getStarted": { + "message": "Commencer" + }, + "welcome.message": { + "message": "Bienvenue à OpenDevin, un système d'IA autonome ingénieur logiciel capable d'exécuter des tâches d'ingénierie complexes et de collaborer activement avec les utilisateurs sur les projets de développement logiciel." + }, + "theme.ErrorPageContent.title": { + "message": "Cette page a planté.", + "description": "The title of the fallback page when the page crashed" + }, + "theme.BackToTopButton.buttonAriaLabel": { + "message": "Retourner en haut de la page", + "description": "The ARIA label for the back to top button" + }, + "theme.blog.archive.title": { + "message": "Archives", + "description": "The page & hero title of the blog archive page" + }, + "theme.blog.archive.description": { + "message": "Archives", + "description": "The page & hero description of the blog archive page" + }, + "theme.blog.paginator.navAriaLabel": { + "message": "Pagination des listes d'articles du blog", + "description": "The ARIA label for the blog pagination" + }, + "theme.blog.paginator.newerEntries": { + "message": "Nouvelles entrées", + "description": "The label used to navigate to the newer blog posts page (previous page)" + }, + "theme.blog.paginator.olderEntries": { + "message": "Anciennes entrées", + "description": "The label used to navigate to the older blog posts page (next page)" + }, + "theme.blog.post.paginator.navAriaLabel": { + "message": "Pagination des articles du blog", + "description": "The ARIA label for the blog posts pagination" + }, + "theme.blog.post.paginator.newerPost": { + "message": "Article plus récent", + "description": "The blog post button label to navigate to the newer/previous post" + }, + "theme.blog.post.paginator.olderPost": { + "message": "Article plus ancien", + "description": "The blog post button label to navigate to the older/next post" + }, + "theme.blog.post.plurals": { + "message": "Un article|{count} articles", + "description": "Pluralized label for \"{count} posts\". Use as much plural forms (separated by \"|\") as your language support (see https://www.unicode.org/cldr/cldr-aux/charts/34/supplemental/language_plural_rules.html)" + }, + "theme.blog.tagTitle": { + "message": "{nPosts} tags avec « {tagName} »", + "description": "The title of the page for a blog tag" + }, + "theme.tags.tagsPageLink": { + "message": "Voir tous les tags", + "description": "The label of the link targeting the tag list page" + }, + "theme.colorToggle.ariaLabel": { + "message": "Basculer entre le mode sombre et clair (actuellement {mode})", + "description": "The ARIA label for the navbar color mode toggle" + }, + "theme.colorToggle.ariaLabel.mode.dark": { + "message": "mode sombre", + "description": "The name for the dark color mode" + }, + "theme.colorToggle.ariaLabel.mode.light": { + "message": "mode clair", + "description": "The name for the light color mode" + }, + "theme.docs.breadcrumbs.navAriaLabel": { + "message": "Bouton de navigation des liens de la page", + "description": "The ARIA label for the breadcrumbs" + }, + "theme.docs.DocCard.categoryDescription.plurals": { + "message": "1 élément|{count} éléments", + "description": "The default description for a category card in the generated index about how many items this category includes" + }, + "theme.docs.paginator.navAriaLabel": { + "message": "Pages de documentation", + "description": "The ARIA label for the docs pagination" + }, + "theme.docs.paginator.previous": { + "message": "Précédent", + "description": "The label used to navigate to the previous doc" + }, + "theme.docs.paginator.next": { + "message": "Suivant", + "description": "The label used to navigate to the next doc" + }, + "theme.docs.tagDocListPageTitle.nDocsTagged": { + "message": "Un document tagué|{count} documents tagués", + "description": "Pluralized label for \"{count} docs tagged\". Use as much plural forms (separated by \"|\") as your language support (see https://www.unicode.org/cldr/cldr-aux/charts/34/supplemental/language_plural_rules.html)" + }, + "theme.docs.tagDocListPageTitle": { + "message": "{nDocsTagged} avec \"{tagName}\"", + "description": "The title of the page for a docs tag" + }, + "theme.docs.versionBadge.label": { + "message": "Version: {versionLabel}" + }, + "theme.docs.versions.unreleasedVersionLabel": { + "message": "Ceci est la documentation de la prochaine version {versionLabel} de {siteTitle}.", + "description": "The label used to tell the user that he's browsing an unreleased doc version" + }, + "theme.docs.versions.unmaintainedVersionLabel": { + "message": "Ceci est la documentation de {siteTitle} {versionLabel}, qui n'est plus activement maintenue.", + "description": "The label used to tell the user that he's browsing an unmaintained doc version" + }, + "theme.docs.versions.latestVersionSuggestionLabel": { + "message": "Pour une documentation à jour, consultez la {latestVersionLink} ({versionLabel}).", + "description": "The label used to tell the user to check the latest version" + }, + "theme.docs.versions.latestVersionLinkLabel": { + "message": "dernière version", + "description": "The label used for the latest version suggestion link label" + }, + "theme.common.editThisPage": { + "message": "Éditer cette page", + "description": "The link label to edit the current page" + }, + "theme.common.headingLinkTitle": { + "message": "Lien direct vers {heading}", + "description": "Title for link to heading" + }, + "theme.lastUpdated.atDate": { + "message": " le {date}", + "description": "The words used to describe on which date a page has been last updated" + }, + "theme.lastUpdated.byUser": { + "message": " par {user}", + "description": "The words used to describe by who the page has been last updated" + }, + "theme.lastUpdated.lastUpdatedAtBy": { + "message": "Dernière mise à jour{atDate}{byUser}", + "description": "The sentence used to display when a page has been last updated, and by who" + }, + "theme.navbar.mobileVersionsDropdown.label": { + "message": "Versions", + "description": "The label for the navbar versions dropdown on mobile view" + }, + "theme.NotFound.title": { + "message": "Page introuvable", + "description": "The title of the 404 page" + }, + "theme.tags.tagsListLabel": { + "message": "Tags :", + "description": "The label alongside a tag list" + }, + "theme.admonition.caution": { + "message": "prudence", + "description": "The default label used for the Caution admonition (:::caution)" + }, + "theme.admonition.danger": { + "message": "danger", + "description": "The default label used for the Danger admonition (:::danger)" + }, + "theme.admonition.info": { + "message": "information", + "description": "The default label used for the Info admonition (:::info)" + }, + "theme.admonition.note": { + "message": "remarque", + "description": "The default label used for the Note admonition (:::note)" + }, + "theme.admonition.tip": { + "message": "astuce", + "description": "The default label used for the Tip admonition (:::tip)" + }, + "theme.admonition.warning": { + "message": "prudence", + "description": "The default label used for the Warning admonition (:::warning)" + }, + "theme.AnnouncementBar.closeButtonAriaLabel": { + "message": "Fermer", + "description": "The ARIA label for close button of announcement bar" + }, + "theme.blog.sidebar.navAriaLabel": { + "message": "Navigation vers les articles récents du blog", + "description": "The ARIA label for recent posts in the blog sidebar" + }, + "theme.CodeBlock.copied": { + "message": "Copié", + "description": "The copied button label on code blocks" + }, + "theme.CodeBlock.copyButtonAriaLabel": { + "message": "Copier le code", + "description": "The ARIA label for copy code blocks button" + }, + "theme.CodeBlock.copy": { + "message": "Copier", + "description": "The copy button label on code blocks" + }, + "theme.CodeBlock.wordWrapToggle": { + "message": "Activer/désactiver le retour à la ligne", + "description": "The title attribute for toggle word wrapping button of code block lines" + }, + "theme.DocSidebarItem.expandCategoryAriaLabel": { + "message": "Développer la catégorie '{label}' de la barre latérale", + "description": "The ARIA label to expand the sidebar category" + }, + "theme.DocSidebarItem.collapseCategoryAriaLabel": { + "message": "Réduire la catégorie '{label}' de la barre latérale", + "description": "The ARIA label to collapse the sidebar category" + }, + "theme.NavBar.navAriaLabel": { + "message": "Main", + "description": "The ARIA label for the main navigation" + }, + "theme.navbar.mobileLanguageDropdown.label": { + "message": "Langues", + "description": "The label for the mobile language switcher dropdown" + }, + "theme.NotFound.p1": { + "message": "Nous n'avons pas trouvé ce que vous recherchez.", + "description": "The first paragraph of the 404 page" + }, + "theme.NotFound.p2": { + "message": "Veuillez contacter le propriétaire du site qui vous a lié à l'URL d'origine et leur faire savoir que leur lien est cassé.", + "description": "The 2nd paragraph of the 404 page" + }, + "theme.TOCCollapsible.toggleButtonLabel": { + "message": "Sur cette page", + "description": "The label used by the button on the collapsible TOC component" + }, + "theme.blog.post.readMore": { + "message": "Lire plus", + "description": "The label used in blog post item excerpts to link to full blog posts" + }, + "theme.blog.post.readMoreLabel": { + "message": "En savoir plus sur {title}", + "description": "The ARIA label for the link to full blog posts from excerpts" + }, + "theme.blog.post.readingTime.plurals": { + "message": "Une minute de lecture|{readingTime} minutes de lecture", + "description": "Pluralized label for \"{readingTime} min read\". Use as much plural forms (separated by \"|\") as your language support (see https://www.unicode.org/cldr/cldr-aux/charts/34/supplemental/language_plural_rules.html)" + }, + "theme.docs.breadcrumbs.home": { + "message": "Page d'accueil", + "description": "The ARIA label for the home page in the breadcrumbs" + }, + "theme.docs.sidebar.collapseButtonTitle": { + "message": "Réduire le menu latéral", + "description": "The title attribute for collapse button of doc sidebar" + }, + "theme.docs.sidebar.collapseButtonAriaLabel": { + "message": "Réduire le menu latérale", + "description": "The title attribute for collapse button of doc sidebar" + }, + "theme.docs.sidebar.navAriaLabel": { + "message": "Barre de navigation latérale des docs", + "description": "The ARIA label for the sidebar navigation" + }, + "theme.docs.sidebar.closeSidebarButtonAriaLabel": { + "message": "Fermer la barre de navigation", + "description": "The ARIA label for close button of mobile sidebar" + }, + "theme.navbar.mobileSidebarSecondaryMenu.backButtonLabel": { + "message": "← Retour au menu principal", + "description": "The label of the back button to return to main menu, inside the mobile navbar sidebar secondary menu (notably used to display the docs sidebar)" + }, + "theme.docs.sidebar.toggleSidebarButtonAriaLabel": { + "message": "Ouvrir/fermer la barre de navigation", + "description": "The ARIA label for hamburger menu button of mobile navigation" + }, + "theme.docs.sidebar.expandButtonTitle": { + "message": "Déplier le menu latéral", + "description": "The ARIA label and title attribute for expand button of doc sidebar" + }, + "theme.docs.sidebar.expandButtonAriaLabel": { + "message": "Déployer le menu latérale", + "description": "The ARIA label and title attribute for expand button of doc sidebar" + }, + "theme.ErrorPageContent.tryAgain": { + "message": "Réessayer", + "description": "The label of the button to try again rendering when the React error boundary captures an error" + }, + "theme.common.skipToMainContent": { + "message": "Aller directement au contenu principal", + "description": "The skip to content label used for accessibility, allowing to rapidly navigate to main content with keyboard tab/enter navigation" + }, + "theme.tags.tagsPageTitle": { + "message": "Tags", + "description": "The title of the tag list page" + }, + "theme.unlistedContent.title": { + "message": "Page non répertoriée", + "description": "The unlisted content banner title" + }, + "theme.unlistedContent.message": { + "message": "Cette page n'est pas répertoriée. Les moteurs de recherche ne l'indexeront pas, et seuls les utilisateurs ayant un lien direct peuvent y accéder.", + "description": "The unlisted content banner message" + } +} diff --git a/docs/i18n/fr/docusaurus-plugin-content-blog/options.json b/docs/i18n/fr/docusaurus-plugin-content-blog/options.json new file mode 100644 index 000000000000..0eee9a2e7b72 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-blog/options.json @@ -0,0 +1,14 @@ +{ + "title": { + "message": "Blog", + "description": "The title for the blog used in SEO" + }, + "description": { + "message": "Blog", + "description": "The description for the blog used in SEO" + }, + "sidebar.title": { + "message": "Articles récents", + "description": "The label for the left sidebar" + } +} diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current.json b/docs/i18n/fr/docusaurus-plugin-content-docs/current.json new file mode 100644 index 000000000000..eeeb46120b72 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current.json @@ -0,0 +1,18 @@ +{ + "version.label": { + "message": "Next", + "description": "The label for version current" + }, + "sidebar.docsSidebar.category.🤖 Backends LLM": { + "message": "🤖 Backends LLM", + "description": "The label for category 🤖 Backends LLM in sidebar docsSidebar" + }, + "sidebar.docsSidebar.category.🚧 Dépannage": { + "message": "🚧 Dépannage", + "description": "The label for category 🚧 Dépannage in sidebar docsSidebar" + }, + "sidebar.apiSidebar.category.Backend": { + "message": "Backend", + "description": "The label for category Backend in sidebar apiSidebar" + } +} diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/python/python.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/python/python.md new file mode 100644 index 000000000000..a154eec6f3f5 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/python/python.md @@ -0,0 +1,3 @@ +# Documentation Python + +Les documents apparaîtront ici après le déploiement. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/python/sidebar.json b/docs/i18n/fr/docusaurus-plugin-content-docs/current/python/sidebar.json new file mode 100644 index 000000000000..53257aefea85 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/python/sidebar.json @@ -0,0 +1,5 @@ +{ + "items": ["python/python"], + "label": "Backend", + "type": "categorie" +} diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/about.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/about.md new file mode 100644 index 000000000000..b21435b52290 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/about.md @@ -0,0 +1,53 @@ +--- +sidebar_position: 7 +--- + +# 📚 Divers {#misc} + +## ⭐️ Stratégie de Recherche {#research-strategy} + +La réalisation d'une réplication complète des applications de production avec les LLM est une entreprise complexe. Notre stratégie implique : + +1. **Recherche Technique de Base :** Se concentrer sur la recherche fondamentale pour comprendre et améliorer les aspects techniques de la génération et de la gestion de code. +2. **Compétences Spécialisées :** Améliorer l'efficacité des composants de base grâce à la curation des données, aux méthodes de formation, et plus encore. +3. **Planification des Tâches :** Développer des capacités pour la détection de bogues, la gestion du code source et l'optimisation. +4. **Évaluation :** Établir des métriques d'évaluation complètes pour mieux comprendre et améliorer nos modèles. + +## 🚧 Agent Par Défaut {#default-agent} + +- Notre agent par défaut est actuellement le CodeActAgent, capable de générer du code et de gérer des fichiers. Nous travaillons sur d'autres implémentations d'agents, y compris [SWE Agent](https://swe-agent.com/). Vous pouvez [lire à propos de notre ensemble actuel d'agents ici](./agents). + +## 🤝 Comment Contribuer {#how-to-contribute} + +OpenDevin est un projet communautaire, et nous accueillons les contributions de tout le monde. Que vous soyez développeur, chercheur, ou simplement enthousiaste à l'idée de faire progresser le domaine de l'ingénierie logicielle avec l'IA, il existe de nombreuses façons de vous impliquer : + +- **Contributions de Code :** Aidez-nous à développer les fonctionnalités de base, l'interface frontend ou les solutions de sandboxing. +- **Recherche et Évaluation :** Contribuez à notre compréhension des LLM en ingénierie logicielle, participez à l'évaluation des modèles ou suggérez des améliorations. +- **Retour d'Information et Tests :** Utilisez l'ensemble d'outils OpenDevin, signalez des bogues, suggérez des fonctionnalités ou fournissez des retours sur l'ergonomie. + +Pour plus de détails, veuillez consulter [ce document](https://github.com/OpenDevin/OpenDevin/blob/main/CONTRIBUTING.md). + +## 🤖 Rejoignez Notre Communauté {#join-our-community} + +Nous avons maintenant à la fois un espace de travail Slack pour la collaboration sur la construction d'OpenDevin et un serveur Discord pour discuter de tout ce qui est lié, par exemple, à ce projet, aux LLM, aux agents, etc. + +- [Espace de travail Slack](https://join.slack.com/t/opendevin/shared_invite/zt-2jsrl32uf-fTeeFjNyNYxqSZt5NPY3fA) +- [Serveur Discord](https://discord.gg/ESHStjSjD4) + +Si vous souhaitez contribuer, n'hésitez pas à rejoindre notre communauté. Simplifions l'ingénierie logicielle ensemble ! + +🐚 **Codez moins, créez plus avec OpenDevin.** + +[![Star History Chart](https://api.star-history.com/svg?repos=OpenDevin/OpenDevin&type=Date)](https://star-history.com/#OpenDevin/OpenDevin&Date) + +## 🛠️ Construit Avec {#built-with} + +OpenDevin est construit en utilisant une combinaison de cadres et de bibliothèques puissants, offrant une base robuste pour son développement. Voici les technologies clés utilisées dans le projet : + +![FastAPI](https://img.shields.io/badge/FastAPI-black?style=for-the-badge) ![uvicorn](https://img.shields.io/badge/uvicorn-black?style=for-the-badge) ![LiteLLM](https://img.shields.io/badge/LiteLLM-black?style=for-the-badge) ![Docker](https://img.shields.io/badge/Docker-black?style=for-the-badge) ![Ruff](https://img.shields.io/badge/Ruff-black?style=for-the-badge) ![MyPy](https://img.shields.io/badge/MyPy-black?style=for-the-badge) ![LlamaIndex](https://img.shields.io/badge/LlamaIndex-black?style=for-the-badge) ![React](https://img.shields.io/badge/React-black?style=for-the-badge) + +Veuillez noter que la sélection de ces technologies est en cours, et que des technologies supplémentaires peuvent être ajoutées ou des existantes supprimées au fur et à mesure de l'évolution du projet. Nous nous efforçons d'adopter les outils les plus adaptés et efficaces pour améliorer les capacités d'OpenDevin. + +## 📜 Licence {#license} + +Distribué sous la licence MIT. Voir [notre licence](https://github.com/OpenDevin/OpenDevin/blob/main/LICENSE) pour plus d'informations. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/agents.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/agents.md new file mode 100644 index 000000000000..48d90969add2 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/agents.md @@ -0,0 +1,134 @@ +--- +sidebar_position: 3 +--- + +# 🧠 Agents et Capacités + +## Agent CodeAct + +### Description + +Cet agent implémente l'idée CodeAct ([article](https://arxiv.org/abs/2402.01030), [tweet](https://twitter.com/xingyaow_/status/1754556835703751087)) qui consolide les **act**ions des agents LLM en un espace d'action **code** unifié pour à la fois la _simplicité_ et la _performance_ (voir article pour plus de détails). + +L'idée conceptuelle est illustrée ci-dessous. À chaque tour, l'agent peut : + +1. **Converse** : Communiquer avec les humains en langage naturel pour demander des clarifications, des confirmations, etc. +2. **CodeAct** : Choisir d'accomplir la tâche en exécutant du code + +- Exécuter toute commande `bash` Linux valide +- Exécuter tout code `Python` valide avec [un interpréteur Python interactif](https://ipython.org/). Cela est simulé à travers la commande `bash`, voir le système de plugin ci-dessous pour plus de détails. + +![image](https://github.com/OpenDevin/OpenDevin/assets/38853559/92b622e3-72ad-4a61-8f41-8c040b6d5fb3) + +### Système de Plugin + +Pour rendre l'agent CodeAct plus puissant avec seulement l'accès à l'espace d'action `bash`, l'agent CodeAct exploite le système de plugins d'OpenDevin: + +- [Plugin Jupyter](https://github.com/OpenDevin/OpenDevin/tree/main/opendevin/runtime/plugins/jupyter) : pour l'exécution d'IPython via la commande bash +- [Plugin outil agent SWE](https://github.com/OpenDevin/OpenDevin/tree/main/opendevin/runtime/plugins/swe_agent_commands) : Outils de ligne de commande bash puissants pour les tâches de développement logiciel introduits par [swe-agent](https://github.com/princeton-nlp/swe-agent). + +### Démonstration + +https://github.com/OpenDevin/OpenDevin/assets/38853559/f592a192-e86c-4f48-ad31-d69282d5f6ac + +_Exemple de CodeActAgent avec `gpt-4-turbo-2024-04-09` effectuant une tâche de science des données (régression linéaire)_ + +### Actions + +`Action`, +`CmdRunAction`, +`IPythonRunCellAction`, +`AgentEchoAction`, +`AgentFinishAction`, +`AgentTalkAction` + +### Observations + +`CmdOutputObservation`, +`IPythonRunCellObservation`, +`AgentMessageObservation`, +`UserMessageObservation` + +### Méthodes + +| Méthode | Description | +| ---------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | +| `__init__` | Initialise un agent avec `llm` et une liste de messages `list[Mapping[str, str]]` | +| `step` | Effectue une étape en utilisant l'agent CodeAct. Cela inclut la collecte d'informations sur les étapes précédentes et invite le modèle à exécuter une commande. | + +### En cours de réalisation & prochaine étape + +[] Support de la navigation sur le web +[] Compléter le workflow pour l'agent CodeAct afin de soumettre des PRs Github + +## Agent Monologue + +### Description + +L'agent Monologue utilise la mémoire à long terme et à court terme pour accomplir des tâches. +La mémoire à long terme est stockée en tant qu'objet LongTermMemory et le modèle l'utilise pour rechercher des exemples du passé. +La mémoire à court terme est stockée en tant qu'objet Monologue et le modèle peut la condenser si nécessaire. + +### Actions + +`Action`, +`NullAction`, +`CmdRunAction`, +`FileWriteAction`, +`FileReadAction`, +`BrowseURLAction`, +`GithubPushAction`, +`AgentThinkAction` + +### Observations + +`Observation`, +`NullObservation`, +`CmdOutputObservation`, +`FileReadObservation`, +`BrowserOutputObservation` + +### Méthodes + +| Méthode | Description | +| --------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | +| `__init__` | Initialise l'agent avec une mémoire à long terme et un monologue interne | +| `_add_event` | Ajoute des événements au monologue de l'agent et condense avec un résumé automatiquement si le monologue est trop long | +| `_initialize` | Utilise la liste `INITIAL_THOUGHTS` pour donner à l'agent un contexte pour ses capacités et comment naviguer dans le `/workspace` | +| `step` | Modifie l'état actuel en ajoutant les actions et observations les plus récentes, puis invite le modèle à réfléchir à la prochaine action à entreprendre. | + +## Agent Planificateur + +### Description + +L'agent planificateur utilise une stratégie d'incitation spéciale pour créer des plans à long terme pour résoudre les problèmes. +L'agent reçoit ses paires action-observation précédentes, la tâche actuelle, et un indice basé sur la dernière action effectuée à chaque étape. + +### Actions + +`NullAction`, +`CmdRunAction`, +`BrowseURLAction`, +`GithubPushAction`, +`FileReadAction`, +`FileWriteAction`, +`AgentThinkAction`, +`AgentFinishAction`, +`AgentSummarizeAction`, +`AddTaskAction`, +`ModifyTaskAction`, + +### Observations + +`Observation`, +`NullObservation`, +`CmdOutputObservation`, +`FileReadObservation`, +`BrowserOutputObservation` + +### Méthodes + +| Méthode | Description | +| ---------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `__init__` | Initialise un agent avec `llm` | +| `step` | Vérifie si l'étape actuelle est terminée, retourne `AgentFinishAction` si oui. Sinon, crée une incitation de planification et l'envoie au modèle pour inférence, en ajoutant le résultat comme prochaine action. | diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/architecture.mdx b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/architecture.mdx new file mode 100644 index 000000000000..8b986ed8cd99 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/architecture.mdx @@ -0,0 +1,50 @@ +--- +sidebar_position: 4 +--- + +# 🏛️ Aperçu de l'Architecture Système + +Voici un aperçu de haut niveau de l'architecture du système. Le système est divisé en deux composants principaux : le frontend et le backend. Le frontend est responsable de la gestion des interactions avec l'utilisateur et de l'affichage des résultats. Le backend est responsable de la gestion de la logique métier et de l'exécution des agents. + +![system_architecture.svg](/img/system_architecture.svg) + +Cet aperçu est simplifié pour montrer les principaux composants et leurs interactions. Pour une vue plus détaillée de l'architecture du backend, consultez la section [Architecture du Backend](#backend-architecture-fr). + +# Architecture du Backend {#backend-architecture-fr} + +_**Avertissement**: L'architecture du backend est en cours de développement et est sujette à modifications. Le schéma suivant montre l'architecture actuelle du backend basée sur le commit indiqué dans le pied de page du schéma._ + +![backend_architecture.svg](/img/backend_architecture.svg) + +

+ Mise à jour de ce Schéma +
+ La génération du schéma d'architecture du backend est partiellement automatisée. + Le schéma est généré à partir des annotations de type dans le code en utilisant l'outil py2puml. + Le schéma est ensuite revu manuellement, ajusté et exporté en PNG et SVG. + + ## Prérequis + + - Un environnement Python dans lequel opendevin est exécutable + (selon les instructions du fichier README.md à la racine du dépôt) + - [py2puml](https://github.com/lucsorel/py2puml) installé + +## Étapes + +1. Générez automatiquement le schéma en exécutant la commande suivante depuis la racine du dépôt : + `py2puml opendevin opendevin > docs/architecture/backend_architecture.puml` + +2. Ouvrez le fichier généré dans un éditeur PlantUML, par exemple Visual Studio Code avec l'extension PlantUML ou [PlantText](https://www.planttext.com/) + +3. Révisez le PUML généré et apportez toutes les modifications nécessaires au schéma (ajoutez les parties manquantes, corrigez les erreurs, améliorez l'agencement). + _py2puml crée le schéma à partir des annotations de type dans le code, donc les annotations de type manquantes ou incorrectes peuvent entraîner un schéma incomplet ou incorrect._ + +4. Examinez la différence entre le nouveau schéma et le précédent et vérifiez manuellement si les modifications sont correctes. + _Assurez-vous de ne pas supprimer les parties ajoutées manuellement au schéma par le passé et qui sont toujours pertinentes._ + +5. Ajoutez le hash du commit qui a été utilisé pour générer le schéma dans le pied de page du schéma. + +6. Exporte le schéma sous forme de fichiers PNG et SVG et remplacez les schémas existants dans le répertoire `docs/architecture`. Cela peut être fait avec (par exemple [PlantText](https://www.planttext.com/)) + +
+
diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/custom_sandbox_guide.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/custom_sandbox_guide.md new file mode 100644 index 000000000000..4b77b87d2d14 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/custom_sandbox_guide.md @@ -0,0 +1,107 @@ +# 💿 Comment Créer un Soutien Docker sur Mesure + +Le sandbox par défaut OpenDevin est équipé d'une configuration ubuntu minimaliste. Votre cas d'utilisation pourrait nécessiter des logiciels installés par défaut. Cet article vous enseignera comment réaliser cela en utilisant une image docker personnalisée. + +## Configuration + +Assurez-vous de pouvoir utiliser OpenDevin en suivant la documentation [Development.md](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md). + +## Créer Votre Image Docker + +Ensuite, vous devez créer votre image docker personnalisée qui doit être basée sur debian/ubuntu. Par exemple, si nous souhaitons que OpenDevin ait accès au "node" binaire, nous utiliserions ce Dockerfile: + +```bash +# Commencez avec l'image ubuntu la plus récente +FROM ubuntu:latest + +# Effectuez les mises à jour nécessaires +RUN apt-get update && apt-get install + +# Installez nodejs +RUN apt-get install -y nodejs +``` + +Ensuite, construisez votre image docker avec le nom de votre choix. Par exemple "image_personnalisée". Pour cela, créez un répertoire et placez le fichier à l'intérieur avec le nom "Dockerfile", puis dans le répertoire exécutez cette commande: + +```bash +docker build -t image_personnalisée . +``` + +Cela produira une nouvelle image appelée ```image_personnalisée``` qui sera disponible dans Docker Engine. + +> Remarque: Dans la configuration décrite ici, OpenDevin va fonctionner en tant que utilisateur "opendevin" à l'intérieur du sandbox et donc tous les packages installés via le Dockerfile seront disponibles pour tous les utilisateurs sur le système, pas seulement root. +> +> L'installation avec apt-get ci-dessus installe nodejs pour tous les utilisateurs. + +## Spécifiez votre image personnalisée dans le fichier config.toml + +La configuration OpenDevin se fait via le fichier de niveau supérieur ```config.toml``` . +Créez un fichier ```config.toml``` dans le répertoire OpenDevin et entrez ces contenus: + +```toml +[core] +workspace_base="./workspace" +persist_sandbox=false +run_as_devin=true +sandbox_container_image="image_personnalisée" +``` + +> Assurez-vous que ```sandbox_container_image``` est défini sur le nom de votre image personnalisée précédente. + +## Exécution + +Exécutez OpenDevin en exécutant ```make run``` dans le répertoire racine. + +Naviguez vers ```localhost:3001``` et vérifiez si vos dépendances souhaitées sont disponibles. + +Dans le cas de l'exemple ci-dessus, la commande ```node -v``` dans la console produit ```v18.19.1``` + +Félicitations ! + +## Explication technique + +Le code pertinent est défini dans [ssh_box.py](https://github.com/OpenDevin/OpenDevin/blob/main/opendevin/runtime/docker/ssh_box.py) et [image_agnostic_util.py](https://github.com/OpenDevin/OpenDevin/blob/main/opendevin/runtime/docker/image_agnostic_util.py). + +En particulier, ssh_box.py vérifie l'objet config pour ```config.sandbox_container_image``` et ensuite tente de récupérer l'image à l'aide de [get_od_sandbox_image](https://github.com/OpenDevin/OpenDevin/blob/main/opendevin/runtime/docker/image_agnostic_util.py#L72), qui est défini dans image_agnostic_util.py. + +Lorsqu'une image personnalisée est utilisée pour la première fois, elle ne sera pas trouvée et donc elle sera construite (à l'exécution ultérieure, l'image construite sera trouvée et renvoyée). + +L'image personnalisée est construite avec [_build_sandbox_image()](https://github.com/OpenDevin/OpenDevin/blob/main/opendevin/runtime/docker/image_agnostic_util.py#L29), qui crée un fichier docker en utilisant votre image personnalisée comme base et configure ensuite l'environnement pour OpenDevin, comme ceci: + +```python +dockerfile_content = ( + f'FROM {base_image}\n' + 'RUN apt update && apt install -y openssh-server wget sudo\n' + 'RUN mkdir -p -m0755 /var/run/sshd\n' + 'RUN mkdir -p /opendevin && mkdir -p /opendevin/logs && chmod 777 /opendevin/logs\n' + 'RUN wget "https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-$(uname)-$(uname -m).sh"\n' + 'RUN bash Miniforge3-$(uname)-$(uname -m).sh -b -p /opendevin/miniforge3\n' + 'RUN bash -c ". /opendevin/miniforge3/etc/profile.d/conda.sh && conda config --set changeps1 False && conda config --append channels conda-forge"\n' + 'RUN echo "export PATH=/opendevin/miniforge3/bin:$PATH" >> ~/.bashrc\n' + 'RUN echo "export PATH=/opendevin/miniforge3/bin:$PATH" >> /opendevin/bash.bashrc\n' + ).strip() +``` + +> Remarque: Le nom de l'image est modifié via [_get_new_image_name()](https://github.com/OpenDevin/OpenDevin/blob/main/opendevin/runtime/docker/image_agnostic_util.py#L63) et c'est ce nom modifié qui sera recherché lors des exécutions ultérieures. + +## Dépannage / Erreurs + +### Erreur: ```useradd: UID 1000 est non unique``` +Si vous voyez cette erreur dans la sortie de la console, il s'agit du fait que OpenDevin essaie de créer le utilisateur opendevin dans le sandbox avec un ID d'utilisateur de 1000, cependant cet ID d'utilisateur est déjà utilisé dans l'image (pour une raison inconnue). Pour résoudre ce problème, changez la valeur du champ sandbox_user_id dans le fichier config.toml en une valeur différente: + +```toml +[core] +workspace_base="./workspace" +persist_sandbox=false +run_as_devin=true +sandbox_container_image="image_personnalisée" +sandbox_user_id="1001" +``` + +### Erreurs de port d'utilisation + +Si vous voyez un message d'erreur indiquant que le port est utilisé ou indisponible, essayez de supprimer toutes les containers docker en cours d'exécution (exécutez `docker ps` et `docker rm` des containers concernés) puis ré-exécutez ```make run``` + +## Discuter + +Pour d'autres problèmes ou questions rejoignez le [Slack](https://join.slack.com/t/opendevin/shared_invite/zt-2jsrl32uf-fTeeFjNyNYxqSZt5NPY3fA) ou le [Discord](https://discord.gg/ESHStjSjD4) et demandez! diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/feedback.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/feedback.md new file mode 100644 index 000000000000..78d683f9cf16 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/feedback.md @@ -0,0 +1,18 @@ +--- +sidebar_position: 6 +--- + +# ✅ Fournir des Commentaires + +Lorsque vous utilisez OpenDevin, vous rencontrerez sans aucun doute des cas où les choses fonctionnent bien et d'autres où elles ne fonctionnent pas. Nous vous encourageons à fournir des commentaires lorsque vous utilisez OpenDevin pour aider l'équipe de développement et, peut-être plus important encore, créer un corpus ouvert d'exemples de formation pour les agents de codage -- Partagez-OpenDevin ! + +## 📝 Comment Fournir des Commentaires + +Fournir des commentaires est simple ! Lorsque vous utilisez OpenDevin, vous pouvez appuyer sur le bouton de pouce vers le haut ou vers le bas à n'importe quel moment de votre interaction. Vous serez invité à fournir votre adresse email (par exemple, afin que nous puissions vous contacter si nous voulons poser des questions de suivi), et vous pouvez choisir si vous souhaitez fournir des commentaires publiquement ou en privé. + + + +## 📜 Licence de Données et Confidentialité + +* Les données **publiques** seront distribuées sous la licence MIT, comme OpenDevin lui-même, et pourront être utilisées par la communauté pour former et tester des modèles. Évidemment, les commentaires que vous pouvez rendre publics seront plus précieux pour la communauté dans son ensemble, donc lorsque vous ne traitez pas d'informations sensibles, nous vous encourageons à choisir cette option ! +* Les données **privées** ne seront partagées qu'avec l'équipe OpenDevin dans le but d'améliorer OpenDevin. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/intro.mdx b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/intro.mdx new file mode 100644 index 000000000000..69325dc4fa93 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/intro.mdx @@ -0,0 +1,111 @@ +--- +sidebar_position: 1 +--- + +# 💻 OpenDevin + +OpenDevin est un **ingénieur logiciel IA autonome** capable d'exécuter des tâches d'ingénierie complexes et de collaborer activement avec les utilisateurs sur des projets de développement logiciel. +Ce projet est entièrement open-source, vous pouvez donc l'utiliser et le modifier comme bon vous semble. + +:::tip +Explorez le code source d'OpenDevin sur [GitHub](https://github.com/OpenDevin/OpenDevin) ou rejoignez l'une de nos communautés ! + + + Contributors + + + Forks + + + Stargazers + + + Issues + +

+ + MIT License + +

+ + Join our Slack community + + + Join our Discord community + +::: + +## 🛠️ Pour commencer + +La manière la plus simple d'exécuter OpenDevin est à l'intérieur d'un conteneur Docker. Il fonctionne mieux avec la version la plus récente de Docker, `26.0.0`. +Vous devez utiliser Linux, Mac OS ou WSL sur Windows. + +Pour démarrer OpenDevin dans un conteneur docker, exécutez les commandes suivantes dans votre terminal : + +:::warning +Lorsque vous exécutez la commande suivante, les fichiers dans `./workspace` peuvent être modifiés ou supprimés. +::: + +```bash +WORKSPACE_BASE=$(pwd)/workspace +docker run -it \ + --pull=always \ + -e SANDBOX_USER_ID=$(id -u) \ + -e PERSIST_SANDBOX="true" \ + -e SSH_PASSWORD="make something up here" \ + -e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \ + -v $WORKSPACE_BASE:/opt/workspace_base \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -p 3000:3000 \ + --add-host host.docker.internal:host-gateway \ + --name opendevin-app-$(date +%Y%m%d%H%M%S) \ + ghcr.io/opendevin/opendevin:0.6 +``` + +Vous trouverez OpenDevin fonctionnant à l'adresse [http://localhost:3000](http://localhost:3000) avec accès à `./workspace`. Pour qu'OpenDevin fonctionne sur votre code, placez-le dans `./workspace`. + +OpenDevin n'aura accès qu'à ce dossier de workspace. Le reste de votre système ne sera pas affecté car il s'exécute dans un bac à sable sécurisé de docker. + +:::tip +Si vous souhaitez utiliser la version **(instable !)** la plus récente, vous pouvez utiliser `ghcr.io/opendevin/opendevin:main` comme image (dernière ligne). +::: + +Pour le workflow de développement, consultez [Development.md](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md). + +Avez-vous des problèmes ? Consultez notre [Guide de dépannage](https://opendevin.github.io/OpenDevin/modules/usage/troubleshooting). + +:::warning +OpenDevin est actuellement en cours de développement, mais vous pouvez déjà exécuter la version alpha pour voir le système de bout en bout en action. +::: + +[contributors-shield]: https://img.shields.io/github/contributors/opendevin/opendevin?style=for-the-badge +[contributors-url]: https://github.com/OpenDevin/OpenDevin/graphs/contributors +[forks-shield]: https://img.shields.io/github/forks/opendevin/opendevin?style=for-the-badge +[forks-url]: https://github.com/OpenDevin/OpenDevin/network/members +[stars-shield]: https://img.shields.io/github/stars/opendevin/opendevin?style=for-the-badge +[stars-url]: https://github.com/OpenDevin/OpenDevin/stargazers +[issues-shield]: https://img.shields.io/github/issues/opendevin/opendevin?style=for-the-badge +[issues-url]: https://github.com/OpenDevin/OpenDevin/issues +[license-shield]: https://img.shields.io/github/license/opendevin/opendevin?style=for-the-badge +[license-url]: https://github.com/OpenDevin/OpenDevin/blob/main/LICENSE diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/llms/azureLLMs.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/llms/azureLLMs.md new file mode 100644 index 000000000000..9e6066bd6cc9 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/llms/azureLLMs.md @@ -0,0 +1,37 @@ +# Azure OpenAI LLM + +## Complétion + +OpenDevin utilise LiteLLM pour les appels de complétion. Vous pouvez trouver leur documentation sur Azure [ici](https://docs.litellm.ai/docs/providers/azure) + +### Configurations openai Azure + +Lors de l'exécution de l'image Docker OpenDevin, vous devrez définir les variables d'environnement suivantes en utilisant `-e` : + +``` +LLM_BASE_URL="" # e.g. "https://openai-gpt-4-test-v-1.openai.azure.com/" +LLM_API_KEY="" +LLM_MODEL="azure/" +LLM_API_VERSION = "" # e.g. "2024-02-15-preview" +``` + +:::note +Vous pouvez trouver le nom de votre déploiement ChatGPT sur la page des déploiements sur Azure. Par défaut ou initialement, il pourrait être le même que le nom du modèle de chat (par exemple 'GPT4-1106-preview'), mais il n'est pas obligé de l'être. Exécutez OpenDevin, et une fois chargé dans le navigateur, allez dans Paramètres et définissez le modèle comme suit : "azure/<your-actual-gpt-deployment-name>". Si ce n'est pas dans la liste, entrez votre propre texte et enregistrez-le. +::: + +## Embeddings + +OpenDevin utilise llama-index pour les embeddings. Vous pouvez trouver leur documentation sur Azure [ici](https://docs.llamaindex.ai/en/stable/api_reference/embeddings/azure_openai/) + +### Configurations openai Azure + +Le modèle utilisé pour les embeddings Azure OpenAI est "text-embedding-ada-002". +Vous avez besoin du nom de déploiement correct pour ce modèle dans votre compte Azure. + +Lors de l'exécution d'OpenDevin dans Docker, définissez les variables d'environnement suivantes en utilisant `-e` : + +``` +LLM_EMBEDDING_MODEL="azureopenai" +LLM_EMBEDDING_DEPLOYMENT_NAME = "" # e.g. "TextEmbedding..." +LLM_API_VERSION = "" # e.g. "2024-02-15-preview" +``` diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/llms/googleLLMs.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/llms/googleLLMs.md new file mode 100644 index 000000000000..5f2154a69e4b --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/llms/googleLLMs.md @@ -0,0 +1,28 @@ +# Google Gemini/Vertex LLM + +## Complétion + +OpenDevin utilise LiteLLM pour les appels de complétion. Les ressources suivantes sont pertinentes pour utiliser OpenDevin avec les LLMs de Google : + +- [Gemini - Google AI Studio](https://docs.litellm.ai/docs/providers/gemini) +- [VertexAI - Google Cloud Platform](https://docs.litellm.ai/docs/providers/vertex) + +### Configurations de Gemini - Google AI Studio + +Pour utiliser Gemini via Google AI Studio lors de l'exécution de l'image Docker d'OpenDevin, vous devez définir les variables d'environnement suivantes en utilisant `-e` : + +``` +GEMINI_API_KEY="" +LLM_MODEL="gemini/gemini-1.5-pro" +``` + +### Configurations de Vertex AI - Google Cloud Platform + +Pour utiliser Vertex AI via Google Cloud Platform lors de l'exécution de l'image Docker d'OpenDevin, vous devez définir les variables d'environnement suivantes en utilisant `-e` : + +``` +GOOGLE_APPLICATION_CREDENTIALS="" +VERTEXAI_PROJECT="" +VERTEXAI_LOCATION="" +LLM_MODEL="vertex_ai/" +``` diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/llms/llms.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/llms/llms.md new file mode 100644 index 000000000000..56164751a7c4 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/llms/llms.md @@ -0,0 +1,44 @@ +--- +sidebar_position: 2 +--- + +# 🤖 Backends LLM + +OpenDevin peut fonctionner avec n'importe quel backend LLM. +Pour une liste complète des fournisseurs et des modèles LM disponibles, veuillez consulter la +[documentation litellm](https://docs.litellm.ai/docs/providers). + +:::warning +OpenDevin émettra de nombreuses invitations au LLM que vous configurez. La plupart de ces LLM coûtent de l'argent -- assurez-vous de définir des limites de dépenses et de surveiller l'utilisation. +::: + +La variable d'environnement `LLM_MODEL` contrôle le modèle utilisé dans les interactions programmatiques. +Mais en utilisant l'interface utilisateur OpenDevin, vous devrez choisir votre modèle dans la fenêtre des paramètres (la roue dentée en bas à gauche). + +Les variables d'environnement suivantes peuvent être nécessaires pour certains LLM : + +- `LLM_API_KEY` +- `LLM_BASE_URL` +- `LLM_EMBEDDING_MODEL` +- `LLM_EMBEDDING_DEPLOYMENT_NAME` +- `LLM_API_VERSION` + +Nous avons quelques guides pour exécuter OpenDevin avec des fournisseurs de modèles spécifiques : + +- [ollama](llms/localLLMs) +- [Azure](llms/azureLLMs) + +Si vous utilisez un autre fournisseur, nous vous encourageons à ouvrir une PR pour partager votre configuration ! + +## Remarque sur les modèles alternatifs + +Les meilleurs modèles sont GPT-4 et Claude 3. Les modèles locaux et open source actuels ne sont pas aussi puissants. +Lors de l'utilisation d'un modèle alternatif, vous pouvez constater des temps d'attente prolongés entre les messages, +des réponses de mauvaise qualité ou des erreurs sur des JSON mal formés. OpenDevin +ne peut être aussi puissant que les modèles qui le pilotent -- heureusement, les membres de notre équipe travaillent activement à la construction de meilleurs modèles open source ! + +## Réessais d'API et limites de taux + +Certains LLM ont des limites de taux et peuvent nécessiter des réessais. OpenDevin réessaiera automatiquement les demandes s'il reçoit une erreur 429 ou une erreur de connexion API. +Vous pouvez définir les variables d'environnement `LLM_NUM_RETRIES`, `LLM_RETRY_MIN_WAIT`, `LLM_RETRY_MAX_WAIT` pour contrôler le nombre de réessais et le temps entre les réessais. +Par défaut, `LLM_NUM_RETRIES` est 5 et `LLM_RETRY_MIN_WAIT`, `LLM_RETRY_MAX_WAIT` sont respectivement de 3 secondes et 60 secondes. diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/llms/localLLMs.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/llms/localLLMs.md new file mode 100644 index 000000000000..3dcbfc143bb5 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/llms/localLLMs.md @@ -0,0 +1,141 @@ +# LLM Local avec Ollama + +Assurez-vous que le serveur Ollama est en cours d'exécution. +Pour des instructions détaillées de démarrage, consultez [ici](https://github.com/ollama/ollama) + +Ce guide suppose que vous avez démarré ollama avec `ollama serve`. Si vous exécutez ollama différemment (par exemple, à l'intérieur de docker), les instructions pourraient devoir être modifiées. Veuillez noter que si vous utilisez WSL, la configuration par défaut de ollama bloque les requêtes des conteneurs docker. Voir [ici](#configuring-ollama-service-fr). + +## Télécharger des modèles + +Les noms des modèles Ollama peuvent être trouvés [ici](https://ollama.com/library). Pour un petit exemple, vous pouvez utiliser +le modèle `codellama:7b`. Des modèles plus grands offriront généralement de meilleures performances. + +```bash +ollama pull codellama:7b +``` + +vous pouvez vérifier quels modèles vous avez téléchargés de cette manière : + +```bash +~$ ollama list +NAME ID SIZE MODIFIED +codellama:7b 8fdf8f752f6e 3.8 GB 6 weeks ago +mistral:7b-instruct-v0.2-q4_K_M eb14864c7427 4.4 GB 2 weeks ago +starcoder2:latest f67ae0f64584 1.7 GB 19 hours ago +``` + +## Démarrer OpenDevin + +### Docker + +Utilisez les instructions [ici](../intro) pour démarrer OpenDevin en utilisant Docker. +Mais lors de l'exécution de `docker run`, vous devrez ajouter quelques arguments supplémentaires : + +```bash +--add-host host.docker.internal:host-gateway \ +-e LLM_API_KEY="ollama" \ +-e LLM_BASE_URL="http://host.docker.internal:11434" \ +``` + +Par exemple : + +```bash +# Le répertoire que vous souhaitez qu'OpenDevin modifie. DOIT être un chemin absolu ! +export WORKSPACE_BASE=$(pwd)/workspace + +docker run \ + -it \ + --pull=always \ + --add-host host.docker.internal:host-gateway \ + -e SANDBOX_USER_ID=$(id -u) \ + -e LLM_API_KEY="ollama" \ + -e LLM_BASE_URL="http://host.docker.internal:11434" \ + -e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \ + -v $WORKSPACE_BASE:/opt/workspace_base \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -p 3000:3000 \ + ghcr.io/opendevin/opendevin:main +``` + +Vous devriez maintenant pouvoir vous connecter à `http://localhost:3000/` + +### Compiler à partir des sources + +Utilisez les instructions dans [Development.md](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md) pour compiler OpenDevin. +Assurez-vous que `config.toml` soit présent en exécutant `make setup-config` qui en créera un pour vous. Dans `config.toml`, saisissez les éléments suivants : + +``` +LLM_MODEL="ollama/codellama:7b" +LLM_API_KEY="ollama" +LLM_EMBEDDING_MODEL="local" +LLM_BASE_URL="http://localhost:11434" +WORKSPACE_BASE="./workspace" +WORKSPACE_DIR="$(pwd)/workspace" +``` + +Remplacez `LLM_MODEL` par celui de votre choix si nécessaire. + +Fini ! Vous pouvez maintenant démarrer Devin avec : `make run` sans Docker. Vous devriez maintenant pouvoir vous connecter à `http://localhost:3000/` + +## Sélection de votre modèle + +Dans l'interface OpenDevin, cliquez sur l'icône des paramètres en bas à gauche. +Ensuite, dans l'entrée `Model`, saisissez `ollama/codellama:7b`, ou le nom du modèle que vous avez téléchargé précédemment. +S'il n'apparaît pas dans un menu déroulant, ce n'est pas grave, tapez-le simplement. Cliquez sur Enregistrer lorsque vous avez terminé. + +Et maintenant, vous êtes prêt à démarrer ! + +## Configuration du service ollama (WSL){#configuring-ollama-service-fr} + +La configuration par défaut pour ollama sous WSL ne sert que localhost. Cela signifie que vous ne pouvez pas l'atteindre depuis un conteneur docker, par exemple, il ne fonctionnera pas avec OpenDevin. Testons d'abord que ollama est en cours d'exécution correctement. + +```bash +ollama list # obtenir la liste des modèles installés +curl http://localhost:11434/api/generate -d '{"model":"[NAME]","prompt":"hi"}' +#ex. curl http://localhost:11434/api/generate -d '{"model":"codellama:7b","prompt":"hi"}' +#ex. curl http://localhost:11434/api/generate -d '{"model":"codellama","prompt":"hi"}' #le tag est optionnel s'il n'y en a qu'un seul +``` + +Une fois cela fait, testez qu'il accepte les requêtes "externes", comme celles provenant d'un conteneur docker. + +```bash +docker ps # obtenir la liste des conteneurs docker en cours d'exécution, pour un test le plus précis choisissez le conteneur de sandbox open devin. +docker exec [CONTAINER ID] curl http://host.docker.internal:11434/api/generate -d '{"model":"[NAME]","prompt":"hi"}' +#ex. docker exec cd9cc82f7a11 curl http://host.docker.internal:11434/api/generate -d '{"model":"codellama","prompt":"hi"}' +``` + +## Correction + +Maintenant faisons en sorte que cela fonctionne. Modifiez /etc/systemd/system/ollama.service avec les privilèges sudo. (Le chemin peut varier selon la distribution Linux) + +```bash +sudo vi /etc/systemd/system/ollama.service +``` + +ou + +```bash +sudo nano /etc/systemd/system/ollama.service +``` + +Dans la section [Service], ajoutez ces lignes + +``` +Environment="OLLAMA_HOST=0.0.0.0:11434" +Environment="OLLAMA_ORIGINS=*" +``` + +Ensuite, sauvegardez, rechargez la configuration et redémarrez le service. + +```bash +sudo systemctl daemon-reload +sudo systemctl restart ollama +``` + +Enfin, testez que ollama est accessible depuis le conteneur + +```bash +ollama list # obtenir la liste des modèles installés +docker ps # obtenir la liste des conteneurs docker en cours d'exécution, pour un test le plus précis choisissez le conteneur de sandbox open devin. +docker exec [CONTAINER ID] curl http://host.docker.internal:11434/api/generate -d '{"model":"[NAME]","prompt":"hi"}' +``` diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/troubleshooting/troubleshooting.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/troubleshooting/troubleshooting.md new file mode 100644 index 000000000000..942add81ce98 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/troubleshooting/troubleshooting.md @@ -0,0 +1,207 @@ +--- +sidebar_position: 5 +--- + +# 🚧 Dépannage + +Il existe certains messages d'erreur qui sont souvent signalés par les utilisateurs. + +Nous essaierons de rendre le processus d'installation plus facile et ces messages d'erreur +mieux à l'avenir. Mais pour l'instant, vous pouvez rechercher votre message d'erreur ci-dessous et voir s'il existe des solutions de contournement. + +Pour chacun de ces messages d'erreur, **il existe un problème existant**. Veuillez ne pas +ouvrir un nouveau problème - commentez simplement dessus. + +Si vous trouvez plus d'informations ou une solution de contournement pour l'un de ces problèmes, veuillez ouvrir un *PR* pour ajouter des détails à ce fichier. + +:::tip +Si vous utilisez Windows et que vous rencontrez des problèmes, consultez notre [guide pour les utilisateurs de Windows (WSL)](troubleshooting/windows). +::: + +## Impossible de se connecter à Docker + +[Problème GitHub](https://github.com/OpenDevin/OpenDevin/issues/1226) + +### Symptômes + +```bash +Erreur lors de la création du contrôleur. Veuillez vérifier que Docker est en cours d'exécution et visitez `https://opendevin.github.io/OpenDevin/modules/usage/troubleshooting` pour plus d'informations sur le débogage. +``` + +```bash +docker.errors.DockerException: Erreur lors de la récupération de la version de l'API du serveur : ('Connection aborted.', FileNotFoundError(2, 'Aucun fichier ou répertoire de ce type')) +``` + +### Détails + +OpenDevin utilise un conteneur Docker pour effectuer son travail en toute sécurité, sans risquer de briser votre machine. + +### Solutions de contournement + +* Exécutez `docker ps` pour vous assurer que docker est en cours d'exécution +* Assurez-vous que vous n'avez pas besoin de `sudo` pour exécuter docker [voir ici](https://www.baeldung.com/linux/docker-run-without-sudo) +* Si vous êtes sur un Mac, vérifiez les [exigences en matière d'autorisations](https://docs.docker.com/desktop/mac/permission-requirements/) et envisagez particulièrement d'activer l'option `Allow the default Docker socket to be used` sous `Settings > Advanced` dans Docker Desktop. +* De plus, mettez à jour Docker vers la dernière version sous `Check for Updates` + +## Impossible de se connecter à la boîte SSH + +[Problème GitHub](https://github.com/OpenDevin/OpenDevin/issues/1156) + +### Symptômes + +```python +self.shell = DockerSSHBox( +... +pexpect.pxssh.ExceptionPxssh: Impossible d'établir une connexion avec l'hôte +``` + +### Détails + +Par défaut, OpenDevin se connecte à un conteneur en cours d'exécution via SSH. Sur certaines machines, +en particulier Windows, cela semble échouer. + +### Solutions de contournement + +* Redémarrez votre ordinateur (parfois cela fonctionne) +* Assurez-vous d'avoir les dernières versions de WSL et Docker +* Vérifiez que votre distribution dans WSL est également à jour +* Essayez [ce guide de réinstallation](https://github.com/OpenDevin/OpenDevin/issues/1156#issuecomment-2064549427) + +## Impossible de se connecter à LLM + +[Problème GitHub](https://github.com/OpenDevin/OpenDevin/issues/1208) + +### Symptômes + +```python + File "/app/.venv/lib/python3.12/site-packages/openai/_exceptions.py", line 81, in __init__ + super().__init__(message, response.request, body=body) + ^^^^^^^^^^^^^^^^ +AttributeError: 'NoneType' object has no attribute 'request' +``` + +### Détails + +[Problèmes GitHub](https://github.com/OpenDevin/OpenDevin/issues?q=is%3Aissue+is%3Aopen+404) + +Cela se produit généralement avec les configurations de LLM *locales*, lorsque OpenDevin ne parvient pas à se connecter au serveur LLM. +Consultez notre guide pour [LLMs locaux](llms/localLLMs) pour plus d'informations. + +### Solutions de contournement + +* Vérifiez votre `base_url` dans votre config.toml (si elle existe) sous la section "llm" +* Vérifiez que ollama (ou tout autre LLM que vous utilisez) fonctionne correctement +* Assurez-vous d'utiliser `--add-host host.docker.internal:host-gateway` lorsque vous utilisez Docker + +## `404 Ressource non trouvée` + +### Symptômes + +```python +Traceback (most recent call last): + File "/app/.venv/lib/python3.12/site-packages/litellm/llms/openai.py", line 414, in completion + raise e + File "/app/.venv/lib/python3.12/site-packages/litellm/llms/openai.py", line 373, in completion + response = openai_client.chat.completions.create(**data, timeout=timeout) # type: ignore + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/app/.venv/lib/python3.12/site-packages/openai/_utils/_utils.py", line 277, in wrapper + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + File "/app/.venv/lib/python3.12/site-packages/openai/resources/chat/completions.py", line 579, in create + return self._post( + ^^^^^^^^^^^ + File "/app/.venv/lib/python3.12/site-packages/openai/_base_client.py", line 1232, in post + return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/app/.venv/lib/python3.12/site-packages/openai/_base_client.py", line 921, in request + return self._request( + ^^^^^^^^^^^^^^ + File "/app/.venv/lib/python3.12/site-packages/openai/_base_client.py", line 1012, in _request + raise self._make_status_error_from_response(err.response) from None +openai.NotFoundError: Code d'erreur : 404 - {'error': {'code': '404', 'message': 'Ressource non trouvée'}} +``` + +### Détails + +Cela se produit lorsque LiteLLM (notre bibliothèque pour se connecter à différents fournisseurs de LLM) ne parvient pas à trouver +le point de terminaison API avec lequel vous essayez de vous connecter. Cela arrive le plus souvent aux utilisateurs de Azure ou ollama. + +### Solutions de contournement + +* Vérifiez que vous avez correctement défini `LLM_BASE_URL` +* Vérifiez que le modèle est correctement défini, en fonction des [docs de LiteLLM](https://docs.litellm.ai/docs/providers) + * Si vous êtes en cours d'exécution dans l'interface utilisateur, assurez-vous de définir le `model` dans le modal des paramètres + * Si vous êtes en cours d'exécution sans interface (via main.py), assurez-vous de définir `LLM_MODEL` dans votre env/config +* Assurez-vous de suivre les instructions spéciales de votre fournisseur de LLM + * [ollama](/fr/modules/usage/llms/localLLMs) + * [Azure](/fr/modules/usage/llms/azureLLMs) + * [Google](/fr/modules/usage/llms/googleLLMs) +* Assurez-vous que votre clé API est correcte +* Voyez si vous pouvez vous connecter au LLM en utilisant `curl` +* Essayez de [vous connecter via LiteLLM directement](https://github.com/BerriAI/litellm) pour tester votre configuration + +## `make build` bloqué sur les installations de packages + +### Symptômes + +Installation de package bloquée sur `En attente...` sans aucun message d'erreur : + +```bash +Opérations de package : 286 installations, 0 mises à jour, 0 suppressions + + - Installation de certifi (2024.2.2) : En attente... + - Installation de h11 (0.14.0) : En attente... + - Installation de idna (3.7) : En attente... + - Installation de sniffio (1.3.1) : En attente... + - Installation de typing-extensions (4.11.0) : En attente... +``` + +### Détails + +Dans de rares cas, `make build` peut sembler bloqué sur les installations de packages +sans aucun message d'erreur. + +### Solutions de contournement + +* Le gestionnaire de packages Poetry peut manquer d'un paramètre de configuration concernant +l'emplacement où doivent être recherchées les informations d'identification (keyring). + +### Solution de contournement + +Tout d'abord, vérifiez avec `env` si une valeur pour `PYTHON_KEYRING_BACKEND` existe. +Sinon, exécutez la commande ci-dessous pour la définir à une valeur connue et réessayez la construction : + +```bash +export PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring +``` + +## Les sessions ne sont pas restaurées + +### Symptômes + +OpenDevin demande généralement s'il faut reprendre ou commencer une nouvelle session lors de l'ouverture de l'interface utilisateur. +Mais cliquer sur "Reprendre" démarre toujours une toute nouvelle discussion. + +### Détails + +Avec une installation standard à ce jour, les données de session sont stockées en mémoire. +Actuellement, si le service OpenDevin est redémarré, les sessions précédentes deviennent +invalides (un nouveau secret est généré) et donc non récupérables. + +### Solutions de contournement + +* Modifiez la configuration pour rendre les sessions persistantes en éditant le fichier `config.toml` +(dans le dossier racine d'OpenDevin) en spécifiant un `file_store` et un +`file_store_path` absolu : + +```toml +file_store="local" +file_store_path="/absolute/path/to/opendevin/cache/directory" +``` + +* Ajoutez un secret jwt fixe dans votre .bashrc, comme ci-dessous, afin que les id de session précédents +restent acceptés. + +```bash +EXPORT JWT_SECRET=A_CONST_VALUE +``` diff --git a/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/troubleshooting/windows.md b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/troubleshooting/windows.md new file mode 100644 index 000000000000..40fc25888f65 --- /dev/null +++ b/docs/i18n/fr/docusaurus-plugin-content-docs/current/usage/troubleshooting/windows.md @@ -0,0 +1,76 @@ +# Notes pour les utilisateurs de Windows et WSL + +OpenDevin ne supporte Windows que via [WSL](https://learn.microsoft.com/en-us/windows/wsl/install). +Veuillez vous assurer de lancer toutes les commandes à l'intérieur de votre terminal WSL. + +## Dépannage + +### Erreur : 'docker' n'a pas pu être trouvé dans cette distribution WSL 2. + +Si vous utilisez Docker Desktop, assurez-vous de le démarrer avant d'exécuter toute commande docker depuis l'intérieur de WSL. +Docker doit également avoir l'option d'intégration WSL activée. + +### Recommandation : Ne pas exécuter en tant qu'utilisateur root + +Pour des raisons de sécurité, il est fortement recommandé de ne pas exécuter OpenDevin en tant qu'utilisateur root, mais en tant qu'utilisateur avec un UID non nul. +De plus, les sandboxes persistants ne seront pas pris en charge lors de l'exécution en tant que root et un message approprié pourrait apparaître lors du démarrage d'OpenDevin. + +Références : + +* [Pourquoi il est mauvais de se connecter en tant que root](https://askubuntu.com/questions/16178/why-is-it-bad-to-log-in-as-root) +* [Définir l'utilisateur par défaut dans WSL](https://www.tenforums.com/tutorials/128152-set-default-user-windows-subsystem-linux-distro-windows-10-a.html#option2) +Astuce pour la 2e référence : pour les utilisateurs d'Ubuntu, la commande pourrait en fait être "ubuntupreview" au lieu de "ubuntu". + +### Échec de la création de l'utilisateur opendevin + +Si vous rencontrez l'erreur suivante lors de l'installation : + +```sh +Exception: Failed to create opendevin user in sandbox: 'useradd: UID 0 is not unique' +``` + +Vous pouvez la résoudre en exécutant : + +```sh +export SANDBOX_USER_ID=1000 +``` + +### Installation de Poetry + +* Si vous rencontrez des problèmes pour exécuter Poetry même après l'avoir installé pendant le processus de construction, il peut être nécessaire d'ajouter son chemin binaire à votre environnement : + +```sh +export PATH="$HOME/.local/bin:$PATH" +``` + +* Si `make build` s'arrête avec une erreur telle que : + +```sh +ModuleNotFoundError: no module named +``` + +Cela pourrait être un problème avec le cache de Poetry. +Essayez d'exécuter ces 2 commandes l'une après l'autre : + +```sh +rm -r ~/.cache/pypoetry +make build +``` + +### L'objet NoneType n'a pas d'attribut 'request' + +Si vous rencontrez des problèmes liés au réseau, tels que `NoneType object has no attribute 'request'` lors de l'exécution de `make run`, il peut être nécessaire de configurer vos paramètres réseau WSL2. Suivez ces étapes : + +* Ouvrez ou créez le fichier `.wslconfig` situé à `C:\Users\%username%\.wslconfig` sur votre machine hôte Windows. +* Ajoutez la configuration suivante au fichier `.wslconfig` : + +```sh +[wsl2] +networkingMode=mirrored +localhostForwarding=true +``` + +* Enregistrez le fichier `.wslconfig`. +* Redémarrez WSL2 complètement en quittant toute instance WSL2 en cours d'exécution et en exécutant la commande `wsl --shutdown` dans votre invite de commande ou terminal. +* Après avoir redémarré WSL, essayez d'exécuter `make run` à nouveau. +Le problème réseau devrait être résolu. diff --git a/docs/i18n/fr/docusaurus-theme-classic/navbar.json b/docs/i18n/fr/docusaurus-theme-classic/navbar.json new file mode 100644 index 000000000000..d7270111e84c --- /dev/null +++ b/docs/i18n/fr/docusaurus-theme-classic/navbar.json @@ -0,0 +1,26 @@ +{ + "title": { + "message": "OpenDevin", + "description": "The title in the navbar" + }, + "logo.alt": { + "message": "OpenDevin", + "description": "The alt text of navbar logo" + }, + "item.label.Docs": { + "message": "Docs", + "description": "Navbar item with label Docs" + }, + "item.label.Codebase": { + "message": "Codebase", + "description": "Navbar item with label Codebase" + }, + "item.label.FAQ": { + "message": "FAQ", + "description": "Navbar item with label FAQ" + }, + "item.label.GitHub": { + "message": "GitHub", + "description": "Navbar item with label GitHub" + } +} diff --git a/docs/i18n/zh-Hans/code.json b/docs/i18n/zh-Hans/code.json new file mode 100644 index 000000000000..0cc53c7d82a4 --- /dev/null +++ b/docs/i18n/zh-Hans/code.json @@ -0,0 +1,406 @@ +{ + "footer.title": { + "message": "OpenDevin" + }, + "footer.docs": { + "message": "文档" + }, + "footer.community": { + "message": "社区" + }, + "footer.copyright": { + "message": "版权所有 © {year} OpenDevin" + }, + "faq.title": { + "message": "常见问题解答", + "description": "FAQ Title" + }, + "faq.description": { + "message": "常见问题解答" + }, + "faq.section.title.1": { + "message": "什么是OpenDevin?", + "description": "First Section Title" + }, + "faq.section.highlight": { + "message": "OpenDevin", + "description": "Highlight Text" + }, + "faq.section.description.1": { + "message": "是一个自主的软件工程师,能够端到端地解决软件工程和网页浏览任务。它能执行数据科学查询,如 \"查找上个月OpenDevin仓库中的拉取请求数量\",还能处理软件工程任务,例如 \"请为这个文件添加测试并验证所有测试都通过,如果没有修复该文件\"。", + "description": "Description for OpenDevin" + }, + "faq.section.description.2": { + "message": "同时,OpenDevin是一个代理开发者平台和社区,用于测试和评估新代理的环境。", + "description": "Further Description for OpenDevin" + }, + "faq.section.title.2": { + "message": "支持", + "description": "Support Section Title" + }, + "faq.section.support.answer": { + "message": "如果您发现了可能影响他人的问题,请在 {githubLink} 上提交一个 bug。如果遇到安装困难或有其他疑问,可以访问 {discordLink} 或 {slackLink} 进行提问。", + "description": "Support Answer" + }, + "faq.section.title.3": { + "message": "如何使用OpenDevin解决GitHub上的问题?", + "description": "GitHub Issue Section Title" + }, + "faq.section.github.steps.intro": { + "message": "要通过OpenDevin解决GitHub上的问题,您可以发送一个提示给OpenDevin,请它按照以下步骤操作:", + "description": "GitHub Steps Introduction" + }, + "faq.section.github.step1": { + "message": "阅读问题 https://github.com/OpenDevin/OpenDevin/issues/1611", + "description": "GitHub Step 1" + }, + "faq.section.github.step2": { + "message": "克隆仓库并创建新分支", + "description": "GitHub Step 2" + }, + "faq.section.github.step3": { + "message": "根据问题描述中的说明,修改文件以解决问题", + "description": "GitHub Step 3" + }, + "faq.section.github.step4": { + "message": "使用GITHUB_TOKEN环境变量将结果推送到GitHub", + "description": "GitHub Step 4" + }, + "faq.section.github.step5": { + "message": "告诉我需要前往的链接来提交拉取请求", + "description": "GitHub Step 5" + }, + "faq.section.github.steps.preRun": { + "message": "在运行OpenDevin之前,您可以:", + "description": "GitHub Steps Pre-Run" + }, + "faq.section.github.steps.tokenInfo": { + "message": "其中XXX是您创建的一个具有对OpenDevin仓库写权限的GitHub令牌。如果您的写入权限不足,请将其更改为:", + "description": "GitHub Steps Token Info" + }, + "faq.section.github.steps.usernameInfo": { + "message": "其中USERNAME是您的GitHub用户名。", + "description": "GitHub Steps Username Info" + }, + "faq.section.title.4": { + "message": "OpenDevin与Devin有何不同?", + "description": "Devin Section Title" + }, + "faq.section.devin.linkText": { + "message": "Devin", + "description": "Devin Link Text" + }, + "faq.section.devin.description": { + "message": "是由Cognition Inc.开发的商业产品,它最初为OpenDevin提供了灵感。它们都旨在擅长解决软件工程任务,但您可以下载、使用和修改OpenDevin,而Devin只能通过Cognition网站进行访问。此外,OpenDevin已超越最初的灵感,并成为一个面向代理开发者的社区驱动生态系统,在这里我们欢迎您加入并", + "description": "Devin Description" + }, + "faq.section.devin.contribute": { + "message": "贡献", + "description": "Contribute Link" + }, + "faq.section.title.5": { + "message": "OpenDevin与ChatGPT有何不同?", + "description": "ChatGPT Section Title" + }, + "faq.section.chatgpt.description": { + "message": "您可以通过网络访问ChatGPT,它不与本地文件交互,并且其执行代码的能力有限。因此,它可以编写代码,但测试或执行起来可能不太容易。", + "description": "ChatGPT Description" + }, + "homepage.description": { + "message": "使用AI生成代码的软件工程工具。", + "description": "The homepage description" + }, + "homepage.getStarted": { + "message": "开始使用" + }, + "welcome.message": { + "message": "欢迎来到OpenDevin,这是一个开源自主AI软件工程师,能够执行复杂的工程任务,并积极参与用户在软件开发项目中的协作。" + }, + "theme.ErrorPageContent.title": { + "message": "页面已崩溃。", + "description": "The title of the fallback page when the page crashed" + }, + "theme.BackToTopButton.buttonAriaLabel": { + "message": "返回顶部", + "description": "The ARIA label for the back to top button" + }, + "theme.blog.archive.title": { + "message": "历史博文", + "description": "The page & hero title of the blog archive page" + }, + "theme.blog.archive.description": { + "message": "历史博文", + "description": "The page & hero description of the blog archive page" + }, + "theme.blog.paginator.navAriaLabel": { + "message": "博文列表分页导航", + "description": "The ARIA label for the blog pagination" + }, + "theme.blog.paginator.newerEntries": { + "message": "较新的博文", + "description": "The label used to navigate to the newer blog posts page (previous page)" + }, + "theme.blog.paginator.olderEntries": { + "message": "较旧的博文", + "description": "The label used to navigate to the older blog posts page (next page)" + }, + "theme.blog.post.paginator.navAriaLabel": { + "message": "博文分页导航", + "description": "The ARIA label for the blog posts pagination" + }, + "theme.blog.post.paginator.newerPost": { + "message": "较新一篇", + "description": "The blog post button label to navigate to the newer/previous post" + }, + "theme.blog.post.paginator.olderPost": { + "message": "较旧一篇", + "description": "The blog post button label to navigate to the older/next post" + }, + "theme.blog.post.plurals": { + "message": "{count} 篇博文", + "description": "Pluralized label for \"{count} posts\". Use as much plural forms (separated by \"|\") as your language support (see https://www.unicode.org/cldr/cldr-aux/charts/34/supplemental/language_plural_rules.html)" + }, + "theme.blog.tagTitle": { + "message": "{nPosts} 含有标签「{tagName}」", + "description": "The title of the page for a blog tag" + }, + "theme.tags.tagsPageLink": { + "message": "查看所有标签", + "description": "The label of the link targeting the tag list page" + }, + "theme.colorToggle.ariaLabel": { + "message": "切换浅色/暗黑模式(当前为{mode})", + "description": "The ARIA label for the navbar color mode toggle" + }, + "theme.colorToggle.ariaLabel.mode.dark": { + "message": "暗黑模式", + "description": "The name for the dark color mode" + }, + "theme.colorToggle.ariaLabel.mode.light": { + "message": "浅色模式", + "description": "The name for the light color mode" + }, + "theme.docs.breadcrumbs.navAriaLabel": { + "message": "页面路径", + "description": "The ARIA label for the breadcrumbs" + }, + "theme.docs.DocCard.categoryDescription.plurals": { + "message": "{count} 个项目", + "description": "The default description for a category card in the generated index about how many items this category includes" + }, + "theme.docs.paginator.navAriaLabel": { + "message": "文件选项卡", + "description": "The ARIA label for the docs pagination" + }, + "theme.docs.paginator.previous": { + "message": "上一页", + "description": "The label used to navigate to the previous doc" + }, + "theme.docs.paginator.next": { + "message": "下一页", + "description": "The label used to navigate to the next doc" + }, + "theme.docs.tagDocListPageTitle.nDocsTagged": { + "message": "{count} 篇文档带有标签", + "description": "Pluralized label for \"{count} docs tagged\". Use as much plural forms (separated by \"|\") as your language support (see https://www.unicode.org/cldr/cldr-aux/charts/34/supplemental/language_plural_rules.html)" + }, + "theme.docs.tagDocListPageTitle": { + "message": "{nDocsTagged}「{tagName}」", + "description": "The title of the page for a docs tag" + }, + "theme.docs.versionBadge.label": { + "message": "版本:{versionLabel}" + }, + "theme.docs.versions.unreleasedVersionLabel": { + "message": "此为 {siteTitle} {versionLabel} 版尚未发行的文档。", + "description": "The label used to tell the user that he's browsing an unreleased doc version" + }, + "theme.docs.versions.unmaintainedVersionLabel": { + "message": "此为 {siteTitle} {versionLabel} 版的文档,现已不再积极维护。", + "description": "The label used to tell the user that he's browsing an unmaintained doc version" + }, + "theme.docs.versions.latestVersionSuggestionLabel": { + "message": "最新的文档请参阅 {latestVersionLink} ({versionLabel})。", + "description": "The label used to tell the user to check the latest version" + }, + "theme.docs.versions.latestVersionLinkLabel": { + "message": "最新版本", + "description": "The label used for the latest version suggestion link label" + }, + "theme.common.editThisPage": { + "message": "编辑此页", + "description": "The link label to edit the current page" + }, + "theme.common.headingLinkTitle": { + "message": "{heading}的直接链接", + "description": "Title for link to heading" + }, + "theme.lastUpdated.atDate": { + "message": "于 {date} ", + "description": "The words used to describe on which date a page has been last updated" + }, + "theme.lastUpdated.byUser": { + "message": "由 {user} ", + "description": "The words used to describe by who the page has been last updated" + }, + "theme.lastUpdated.lastUpdatedAtBy": { + "message": "最后{byUser}{atDate}更新", + "description": "The sentence used to display when a page has been last updated, and by who" + }, + "theme.navbar.mobileVersionsDropdown.label": { + "message": "选择版本", + "description": "The label for the navbar versions dropdown on mobile view" + }, + "theme.NotFound.title": { + "message": "找不到页面", + "description": "The title of the 404 page" + }, + "theme.tags.tagsListLabel": { + "message": "标签:", + "description": "The label alongside a tag list" + }, + "theme.admonition.caution": { + "message": "警告", + "description": "The default label used for the Caution admonition (:::caution)" + }, + "theme.admonition.danger": { + "message": "危险", + "description": "The default label used for the Danger admonition (:::danger)" + }, + "theme.admonition.info": { + "message": "信息", + "description": "The default label used for the Info admonition (:::info)" + }, + "theme.admonition.note": { + "message": "备注", + "description": "The default label used for the Note admonition (:::note)" + }, + "theme.admonition.tip": { + "message": "提示", + "description": "The default label used for the Tip admonition (:::tip)" + }, + "theme.admonition.warning": { + "message": "注意", + "description": "The default label used for the Warning admonition (:::warning)" + }, + "theme.AnnouncementBar.closeButtonAriaLabel": { + "message": "关闭", + "description": "The ARIA label for close button of announcement bar" + }, + "theme.blog.sidebar.navAriaLabel": { + "message": "最近博文导航", + "description": "The ARIA label for recent posts in the blog sidebar" + }, + "theme.CodeBlock.copied": { + "message": "复制成功", + "description": "The copied button label on code blocks" + }, + "theme.CodeBlock.copyButtonAriaLabel": { + "message": "将代码复制到剪贴板", + "description": "The ARIA label for copy code blocks button" + }, + "theme.CodeBlock.copy": { + "message": "复制", + "description": "The copy button label on code blocks" + }, + "theme.CodeBlock.wordWrapToggle": { + "message": "切换自动换行", + "description": "The title attribute for toggle word wrapping button of code block lines" + }, + "theme.DocSidebarItem.expandCategoryAriaLabel": { + "message": "展开侧边栏分类 '{label}'", + "description": "The ARIA label to expand the sidebar category" + }, + "theme.DocSidebarItem.collapseCategoryAriaLabel": { + "message": "折叠侧边栏分类 '{label}'", + "description": "The ARIA label to collapse the sidebar category" + }, + "theme.NavBar.navAriaLabel": { + "message": "主导航", + "description": "The ARIA label for the main navigation" + }, + "theme.navbar.mobileLanguageDropdown.label": { + "message": "选择语言", + "description": "The label for the mobile language switcher dropdown" + }, + "theme.NotFound.p1": { + "message": "我们找不到您要找的页面。", + "description": "The first paragraph of the 404 page" + }, + "theme.NotFound.p2": { + "message": "请联系原始链接来源网站的所有者,并告知他们链接已损坏。", + "description": "The 2nd paragraph of the 404 page" + }, + "theme.TOCCollapsible.toggleButtonLabel": { + "message": "本页总览", + "description": "The label used by the button on the collapsible TOC component" + }, + "theme.blog.post.readMore": { + "message": "阅读更多", + "description": "The label used in blog post item excerpts to link to full blog posts" + }, + "theme.blog.post.readMoreLabel": { + "message": "阅读 {title} 的全文", + "description": "The ARIA label for the link to full blog posts from excerpts" + }, + "theme.blog.post.readingTime.plurals": { + "message": "阅读需 {readingTime} 分钟", + "description": "Pluralized label for \"{readingTime} min read\". Use as much plural forms (separated by \"|\") as your language support (see https://www.unicode.org/cldr/cldr-aux/charts/34/supplemental/language_plural_rules.html)" + }, + "theme.docs.breadcrumbs.home": { + "message": "主页面", + "description": "The ARIA label for the home page in the breadcrumbs" + }, + "theme.docs.sidebar.collapseButtonTitle": { + "message": "收起侧边栏", + "description": "The title attribute for collapse button of doc sidebar" + }, + "theme.docs.sidebar.collapseButtonAriaLabel": { + "message": "收起侧边栏", + "description": "The title attribute for collapse button of doc sidebar" + }, + "theme.docs.sidebar.navAriaLabel": { + "message": "文档侧边栏", + "description": "The ARIA label for the sidebar navigation" + }, + "theme.docs.sidebar.closeSidebarButtonAriaLabel": { + "message": "关闭导航栏", + "description": "The ARIA label for close button of mobile sidebar" + }, + "theme.navbar.mobileSidebarSecondaryMenu.backButtonLabel": { + "message": "← 回到主菜单", + "description": "The label of the back button to return to main menu, inside the mobile navbar sidebar secondary menu (notably used to display the docs sidebar)" + }, + "theme.docs.sidebar.toggleSidebarButtonAriaLabel": { + "message": "切换导航栏", + "description": "The ARIA label for hamburger menu button of mobile navigation" + }, + "theme.docs.sidebar.expandButtonTitle": { + "message": "展开侧边栏", + "description": "The ARIA label and title attribute for expand button of doc sidebar" + }, + "theme.docs.sidebar.expandButtonAriaLabel": { + "message": "展开侧边栏", + "description": "The ARIA label and title attribute for expand button of doc sidebar" + }, + "theme.ErrorPageContent.tryAgain": { + "message": "重试", + "description": "The label of the button to try again rendering when the React error boundary captures an error" + }, + "theme.common.skipToMainContent": { + "message": "跳到主要内容", + "description": "The skip to content label used for accessibility, allowing to rapidly navigate to main content with keyboard tab/enter navigation" + }, + "theme.tags.tagsPageTitle": { + "message": "标签", + "description": "The title of the tag list page" + }, + "theme.unlistedContent.title": { + "message": "未列出页", + "description": "The unlisted content banner title" + }, + "theme.unlistedContent.message": { + "message": "此页面未列出。搜索引擎不会对其索引,只有拥有直接链接的用户才能访问。", + "description": "The unlisted content banner message" + } +} diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-blog/options.json b/docs/i18n/zh-Hans/docusaurus-plugin-content-blog/options.json new file mode 100644 index 000000000000..1c54b7738353 --- /dev/null +++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-blog/options.json @@ -0,0 +1,14 @@ +{ + "title": { + "message": "博客", + "description": "The title for the blog used in SEO" + }, + "description": { + "message": "博客", + "description": "The description for the blog used in SEO" + }, + "sidebar.title": { + "message": "最近文章", + "description": "The label for the left sidebar" + } +} diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current.json b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current.json new file mode 100644 index 000000000000..1a364a4c99bd --- /dev/null +++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current.json @@ -0,0 +1,18 @@ +{ + "version.label": { + "message": "Next", + "description": "The label for version current" + }, + "sidebar.docsSidebar.category.🤖 LLM 支持": { + "message": "🤖 LLM 支持", + "description": "The label for category 🤖 LLM 支持 in sidebar docsSidebar" + }, + "sidebar.docsSidebar.category.🚧 故障排除": { + "message": "🚧 故障排除", + "description": "The label for category 🚧 故障排除 in sidebar docsSidebar" + }, + "sidebar.apiSidebar.category.Backend": { + "message": "Backend", + "description": "The label for category Backend in sidebar apiSidebar" + } +} diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/python/python.md b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/python/python.md new file mode 100644 index 000000000000..626c8e7507be --- /dev/null +++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/python/python.md @@ -0,0 +1,3 @@ +# Python 文档 + +部署后文档将会显示在这里。 diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/python/sidebar.json b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/python/sidebar.json new file mode 100644 index 000000000000..bea6aa5279d6 --- /dev/null +++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/python/sidebar.json @@ -0,0 +1,5 @@ +{ + "items": ["python/python"], + "label": "后端", + "type": "category" +} diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/about.md b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/about.md new file mode 100644 index 000000000000..137357724242 --- /dev/null +++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/about.md @@ -0,0 +1,53 @@ +--- +sidebar_position: 7 +--- + +# 📚 杂项 + +## ⭐️ 研究策略 + +通过 LLM 完全复制生产级应用程序是一个复杂的任务。我们的策略包含以下几个方面: + +1. **核心技术研究:** 专注于基础研究,以理解和改进代码生成和处理的技术方面。 +2. **专家能力:** 通过数据策划、训练方法等方式增强核心组件的有效性。 +3. **任务规划:** 开发错误检测、代码库管理和优化的能力。 +4. **评价:** 建立全面的评价指标,以更好地理解和改进我们的模型。 + +## 🚧 默认代理 + +- 我们当前的默认代理是 CodeActAgent,具备生成代码和处理文件的能力。我们正在开发其他代理实现,包括 [SWE Agent](https://swe-agent.com/)。您可以[在这里阅读我们当前的代理集合](./agents)。 + +## 🤝 如何贡献 + +OpenDevin 是一个社区驱动的项目,我们欢迎每个人的贡献。无论您是开发人员、研究人员,还是对用 AI 提升软件工程领域有兴趣,只要您愿意参与,我们都有很多方式可供选择: + +- **代码贡献:** 帮助我们开发核心功能、前端界面或沙箱解决方案。 +- **研究和评价:** 贡献您对 LLM 在软件工程领域理解的见解,参与评估模型,或提出改进建议。 +- **反馈和测试:** 使用 OpenDevin 工具集,报告错误,建议功能,或提供可用性方面的反馈。 + +详情请查阅[此文件](https://github.com/OpenDevin/OpenDevin/blob/main/CONTRIBUTING.md)。 + +## 🤖 加入我们的社区 + +我们现在有一个 Slack 工作区,用于合作建设 OpenDevin,还设有一个 Discord 服务器,用于讨论与该项目、LLM、代理等相关的任何事情。 + +- [Slack 工作区](https://join.slack.com/t/opendevin/shared_invite/zt-2jsrl32uf-fTeeFjNyNYxqSZt5NPY3fA) +- [Discord 服务器](https://discord.gg/ESHStjSjD4) + +如果您愿意贡献,请随时加入我们的社区。让我们一起简化软件工程! + +🐚 **少写代码,用 OpenDevin 做更多的事情。** + +[![Star History Chart](https://api.star-history.com/svg?repos=OpenDevin/OpenDevin&type=Date)](https://star-history.com/#OpenDevin/OpenDevin&Date) + +## 🛠️ 技术选型 + +OpenDevin 使用了一系列强大的框架和库,提供了坚实的开发基础。以下是项目中使用的关键技术: + +![FastAPI](https://img.shields.io/badge/FastAPI-black?style=for-the-badge) ![uvicorn](https://img.shields.io/badge/uvicorn-black?style=for-the-badge) ![LiteLLM](https://img.shields.io/badge/LiteLLM-black?style=for-the-badge) ![Docker](https://img.shields.io/badge/Docker-black?style=for-the-badge) ![Ruff](https://img.shields.io/badge/Ruff-black?style=for-the-badge) ![MyPy](https://img.shields.io/badge/MyPy-black?style=for-the-badge) ![LlamaIndex](https://img.shields.io/badge/LlamaIndex-black?style=for-the-badge) ![React](https://img.shields.io/badge/React-black?style=for-the-badge) + +请注意,这些技术选型仍在进行中,随着项目的发展,可能会添加新的技术或移除现有的技术。我们努力采用最适合、最高效的工具,以增强 OpenDevin 的能力。 + +## 📜 许可证 + +根据 MIT 许可证分发。详见[我们的许可证](https://github.com/OpenDevin/OpenDevin/blob/main/LICENSE)了解更多信息。 diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/agents.md b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/agents.md new file mode 100644 index 000000000000..afa6368b8012 --- /dev/null +++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/agents.md @@ -0,0 +1,134 @@ +--- +sidebar_position: 3 +--- + +# 🧠 Agents and Capabilities + +## CodeAct Agent + +### 描述 + +该Agent实现了CodeAct的思想([论文](https://arxiv.org/abs/2402.01030),[推特](https://twitter.com/xingyaow_/status/1754556835703751087)),将LLM agents的**行为**合并到一个统一的**代码**动作空间中,以实现_简化_和_性能_(详情见论文)。 + +概念理念如下图所示。在每个回合,Agent可以: + +1. **对话**:用自然语言与人类交流,进行澄清、确认等。 +2. **CodeAct**:选择通过执行代码来完成任务 + +- 执行任何有效的Linux `bash`命令 +- 使用[交互式Python解释器](https://ipython.org/)执行任何有效的 `Python`代码。这是通过`bash`命令模拟的,详细信息请参见插件系统。 + +![image](https://github.com/OpenDevin/OpenDevin/assets/38853559/92b622e3-72ad-4a61-8f41-8c040b6d5fb3) + +### 插件系统 + +为了使CodeAct agent在仅能访问`bash`动作空间时更强大,CodeAct agent利用了OpenDevin的插件系统: + +- [Jupyter插件](https://github.com/OpenDevin/OpenDevin/tree/main/opendevin/runtime/plugins/jupyter):通过bash命令实现IPython执行 +- [SWE-agent工具插件](https://github.com/OpenDevin/OpenDevin/tree/main/opendevin/runtime/plugins/swe_agent_commands):为软件开发任务引入的强大bash命令行工具,由[swe-agent](https://github.com/princeton-nlp/swe-agent)提供。 + +### 演示 + +https://github.com/OpenDevin/OpenDevin/assets/38853559/f592a192-e86c-4f48-ad31-d69282d5f6ac + +_CodeActAgent使用`gpt-4-turbo-2024-04-09`执行数据科学任务(线性回归)的示例_ + +### 动作 + +`Action`, +`CmdRunAction`, +`IPythonRunCellAction`, +`AgentEchoAction`, +`AgentFinishAction`, +`AgentTalkAction` + +### 观测 + +`CmdOutputObservation`, +`IPythonRunCellObservation`, +`AgentMessageObservation`, +`UserMessageObservation` + +### 方法 + +| 方法 | 描述 | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| `__init__` | 使用`llm`和一系列信息`list[Mapping[str, str]]`初始化Agent | +| `step` | 使用CodeAct Agent执行一步操作,包括收集前一步的信息并提示模型执行命令。 | + +### 进行中的工作 & 下一步 + +[] 支持Web浏览 +[] 完成CodeAct agent提交Github PR的工作流程 + +## Monologue Agent + +### 描述 + +Monologue Agent利用长短期记忆来完成任务。 +长期记忆存储为LongTermMemory对象,模型使用它来搜索过去的示例。 +短期记忆存储为Monologue对象,模型可以根据需要进行压缩。 + +### 动作 + +`Action`, +`NullAction`, +`CmdRunAction`, +`FileWriteAction`, +`FileReadAction`, +`BrowseURLAction`, +`GithubPushAction`, +`AgentThinkAction` + +### 观测 + +`Observation`, +`NullObservation`, +`CmdOutputObservation`, +`FileReadObservation`, +`BrowserOutputObservation` + +### 方法 + +| 方法 | 描述 | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| `__init__` | 使用长期记忆和内部独白初始化Agent | +| `_add_event` | 将事件附加到Agent的独白中,如独白过长自动与摘要一起压缩 | +| `_initialize` | 使用`INITIAL_THOUGHTS`列表为agent提供其能力的上下文以及如何导航`/workspace` | +| `step` | 通过添加最近的动作和观测修改当前状态,然后提示模型考虑其接下来的动作。 | + +## Planner Agent + +### 描述 + +Planner agent利用特殊的提示策略为解决问题创建长期计划。 +在每一步中,Agent会获得其先前的动作-观测对、当前任务以及基于上一次操作提供的提示。 + +### 动作 + +`NullAction`, +`CmdRunAction`, +`BrowseURLAction`, +`GithubPushAction`, +`FileReadAction`, +`FileWriteAction`, +`AgentThinkAction`, +`AgentFinishAction`, +`AgentSummarizeAction`, +`AddTaskAction`, +`ModifyTaskAction` + +### 观测 + +`Observation`, +`NullObservation`, +`CmdOutputObservation`, +`FileReadObservation`, +`BrowserOutputObservation` + +### 方法 + +| 方法 | 描述 | +| -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `__init__` | 使用`llm`初始化Agent | +| `step` | 检查当前步骤是否完成,如果是则返回`AgentFinishAction`。否则,创建计划提示并发送给模型进行推理,将结果作为下一步动作。 | diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/architecture.mdx b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/architecture.mdx new file mode 100644 index 000000000000..6541151a169b --- /dev/null +++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/architecture.mdx @@ -0,0 +1,49 @@ +--- +sidebar_position: 4 +--- + +# 🏛️ 系统架构概览 + +这是系统架构的高层概览。系统分为两个主要组件:前端和后端。前端负责处理用户交互并显示结果。后端负责处理业务逻辑并执行代理。 + +![system_architecture.svg](/img/system_architecture.svg) + +此概览简化显示了主要组件及其交互。有关后端架构的更详细视图,请参见[后端架构](#backend-architecture-zh-Hans)部分。 + +# 后端架构 {#backend-architecture-zh-Hans} + +_**免责声明**:后端架构正在进行中,可能会有所变化。下图显示了基于图表页脚中的提交内容的当前后端架构。_ + +![backend_architecture.svg](/img/backend_architecture.svg) + +
+ 更新此图表 +
+ 后端架构图的生成部分是自动化的。 + 图表是使用 py2puml 工具从代码中的类型提示生成的。 + 然后人工审核、调整并导出为 PNG 和 SVG。 + + ## 前提条件 + + - 能运行 python 环境,其中 opendevin 可以执行(根据存储库根目录中的 README.md 文件中的说明) + - 安装了 [py2puml](https://github.com/lucsorel/py2puml) + +## 步骤 + +1. 通过从存储库根目录运行以下命令自动生成图表: + `py2puml opendevin opendevin > docs/architecture/backend_architecture.puml` + +2. 在 PlantUML 编辑器中打开生成的文件,例如使用 PlantUML 扩展的 Visual Studio Code 或 [PlantText](https://www.planttext.com/) + +3. 审查生成的 PUML 并对图表进行所有必要的调整(添加缺失部分、修正错误、改进定位)。 + _py2puml 根据代码中的类型提示创建图表,因此缺失或不正确的类型提示可能导致图表不完整或不正确。_ + +4. 审查新旧图表之间的差异,并手动检查更改是否正确。 + _确保不移除过去手动添加到图表中的和仍然相关的部分。_ + +5. 将用于生成图表的提交哈希添加到图表页脚。 + +6. 将图表导出为 PNG 和 SVG 文件,并替换 `docs/architecture` 目录中的现有图表。这可以通过(例如 [PlantText](https://www.planttext.com/))完成。 + +
+
diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/custom_sandbox_guide.md b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/custom_sandbox_guide.md new file mode 100644 index 000000000000..2e639150822a --- /dev/null +++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/custom_sandbox_guide.md @@ -0,0 +1,107 @@ +# 💿 如何创建自定义 Docker 沙箱 + +默认的 OpenDevin 沙箱包含一个[最小化 ubuntu 配置](https://github.com/OpenDevin/OpenDevin/blob/main/containers/sandbox/Dockerfile)。您的应用场景可能需要在默认状态下安装额外的软件。本指南将教您如何通过使用自定义 Docker 映像来实现这一目标。 + +## 环境设置 + +确保您能够首先通过 [Development.md](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md) 运行 OpenDevin。 + +## 创建您的 Docker 映像 + +接下来,您必须创建一个自定义的 Docker 映像,该映像是基于 Debian 或 Ubuntu 的。例如,如果我们希望 OpenDevin 能够访问 "node" 可执行文件,我们可以使用以下 `Dockerfile`: + +```bash +# 从最新版 ubuntu 开始 +FROM ubuntu:latest + +# 运行必要的更新 +RUN apt-get update && apt-get install + +# 安装 node +RUN apt-get install -y nodejs +``` + +然后构建您选择的映像,例如“custom_image”。为此可以在目录中创建文件夹并将 `Dockerfile` 放入其中,并在该目录内运行以下命令: + +```bash +docker build -t custom_image . +``` + +这将生成一个名为 ```custom_image``` 的新映像,并使其可用于 Docker 服务引擎。 + +> 注意:在本文档描述的配置中,OpenDevin 将在沙箱内部以“opendevin”用户身份运行。因此,通过 Dockerfile 安装的所有包应可供系统上的所有用户使用,而不仅仅是 root 用户。 + +> 使用 `apt-get` 上面安装的 node 是为所有用户安装的。 + +## 在 config.toml 文件中指定自定义映像 + +在 OpenDevin 的配置通过顶层的 `config.toml` 文件发生。在 OpenDevin 目录下创建一个 ```config.toml``` 文件,并输入以下内容: + +``` +[core] +workspace_base="./workspace" +persist_sandbox=false +run_as_devin=true +sandbox_container_image="custom_image" +``` + +> 确保 `sandbox_container_image` 设置为您前一步中自定义映像的名称。 + +## 运行 + +通过运行 ```make run``` 在顶层目录下运行 OpenDevin。 + +导航至 ```localhost:3001``` 并检查所需依赖是否可用。 + +在上述示例的情况下,终端中运行 `node -v` 会输出 `v18.19.1`。 + +恭喜您! + +## 技术解释 + +相关代码定义在 [ssh_box.py](https://github.com/OpenDevin/OpenDevin/blob/main/opendevin/runtime/docker/ssh_box.py) 和 [image_agnostic_util.py](https://github.com/OpenDevin/OpenDevin/blob/main/opendevin/runtime/docker/image_agnostic_util.py) 中。 + +特别是 ssh_box.py 检查配置对象中的 ```config.sandbox_container_image```,然后尝试使用 [get_od_sandbox_image](https://github.com/OpenDevin/OpenDevin/blob/main/opendevin/runtime/docker/image_agnostic_util.py#L72),在 image_agnostic_util.py 定义中进行检索。 + +初次使用自定义映像时,该映像将不会被找到,因此将被构建(在后续运行中已构建的映像将被查找并返回)。 + +自定义映像是通过 `_build_sandbox_image()` 构建的,在 [image_agnostic_util.py](https://github.com/OpenDevin/OpenDevin/blob/main/opendevin/runtime/docker/image_agnostic_util.py#L29) 中,使用您的 custom_image 作为基础,并为 OpenDevin 配置环境。例如: + +```python +dockerfile_content = ( + f'FROM {base_image}\n' + 'RUN apt update && apt install -y openssh-server wget sudo\n' + 'RUN mkdir -p -m0755 /var/run/sshd\n' + 'RUN mkdir -p /opendevin && mkdir -p /opendevin/logs && chmod 777 /opendevin/logs\n' + 'RUN wget "https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-$(uname)-$(uname -m).sh"\n' + 'RUN bash Miniforge3-$(uname)-$(uname -m).sh -b -p /opendevin/miniforge3\n' + 'RUN bash -c ". /opendevin/miniforge3/etc/profile.d/conda.sh && conda config --set changeps1 False && conda config --append channels conda-forge"\n' + 'RUN echo "export PATH=/opendevin/miniforge3/bin:$PATH" >> ~/.bashrc\n' + 'RUN echo "export PATH=/opendevin/miniforge3/bin:$PATH" >> /opendevin/bash.bashrc\n' + ).strip() +``` + +> 注意:映像名称通过 [_get_new_image_name()](https://github.com/OpenDevin/OpenDevin/blob/main/opendevin/runtime/docker/image_agnostic_util.py#L63) 修改,并且是后续运行中搜索的修改后的名称。 + +## 故障排除 / 错误 + +### 错误:```useradd: UID 1000 is not unique``` + +如果在控制台输出中看到此错误,说明 OpenDevin 尝试在沙箱中以 UID 1000 创建 opendevin 用户,但该 UID 已经被映像中的其他部分使用(不知何故)。要解决这个问题,请更改 config.toml 文件中的 sandbox_user_id 字段为不同的值: + +``` +[core] +workspace_base="./workspace" +persist_sandbox=false +run_as_devin=true +sandbox_container_image="custom_image" +sandbox_user_id="1001" +``` + +### 端口使用错误 + +如果您看到关于端口被占用或不可用的错误,请尝试删除所有正在运行的 Docker 容器(通过运行 `docker ps` 和 `docker rm` 相关容器),然后重新运行 ```make run```。 + +## 讨论 + +对于其他问题或疑问,请加入 [Slack](https://join.slack.com/t/opendevin/shared_invite/zt-2jsrl32uf-fTeeFjNyNYxqSZt5NPY3fA) 或 [Discord](https://discord.gg/ESHStjSjD4),并提问! diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/feedback.md b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/feedback.md new file mode 100644 index 000000000000..5dde206396dc --- /dev/null +++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/feedback.md @@ -0,0 +1,18 @@ +--- +sidebar_position: 6 +--- + +# ✅ 提供反馈 + +在使用 OpenDevin 时,你无疑会遇到一些情况,某些地方工作得很好,而另一些地方则可能不尽如人意。我们鼓励你在使用 OpenDevin 时提供反馈,这不仅有助于开发团队改善应用,更为重要的是,可以创建一个开放的编码代理训练样例语料库——Share-OpenDevin! + +## 📝 如何提供反馈 + +提供反馈很简单!在使用 OpenDevin 时,你可以在任意时刻按下点赞或点踩按钮。你将被要求提供你的电子邮件地址(例如,以便我们在需要进一步询问时联系你),你可以选择公开或私密地提供反馈。 + + + +## 📜 数据许可与隐私 + +* **公开** 数据将与 OpenDevin 本身一样以 MIT 许可协议发布,并可被社区用来训练和测试模型。显然,你能够公开的反馈对整个社区来说更有价值,因此当你不涉及敏感信息时,我们鼓励你选择这个选项! +* **私密** 数据将仅与 OpenDevin 团队分享,用于改进 OpenDevin。 diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/intro.mdx b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/intro.mdx new file mode 100644 index 000000000000..2d10a0903452 --- /dev/null +++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/intro.mdx @@ -0,0 +1,111 @@ +--- +sidebar_position: 1 +--- + +# 💻 OpenDevin + +OpenDevin 是一个**自主 AI 软件工程师**,能够执行复杂的工程任务,并在软件开发项目中积极与用户合作。 +这个项目是完全开源的,所以你可以随意使用和修改它。 + +:::tip +在 [GitHub](https://github.com/OpenDevin/OpenDevin) 上探索 OpenDevin 的代码库或加入我们的社区! + + + Contributors + + + Forks + + + Stargazers + + + Issues + +

+ + MIT License + +

+ + Join our Slack community + + + Join our Discord community + +::: + +## 🛠️ 入门指南 + +运行 OpenDevin 最简单的方法是在 Docker 容器中。它在 Docker 的最新版本 `26.0.0` 上运行效果最佳。 +你必须使用 Linux、Mac OS 或 Windows 上的 WSL。 + +要在 Docker 容器中启动 OpenDevin,请在终端中运行以下命令: + +:::warning +运行以下命令时,`./workspace` 中的文件可能会被修改或删除。 +::: + +```bash +WORKSPACE_BASE=$(pwd)/workspace +docker run -it \ + --pull=always \ + -e SANDBOX_USER_ID=$(id -u) \ + -e PERSIST_SANDBOX="true" \ + -e SSH_PASSWORD="make something up here" \ + -e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \ + -v $WORKSPACE_BASE:/opt/workspace_base \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -p 3000:3000 \ + --add-host host.docker.internal:host-gateway \ + --name opendevin-app-$(date +%Y%m%d%H%M%S) \ + ghcr.io/opendevin/opendevin:0.6 +``` + +你会发现 OpenDevin 在 [http://localhost:3000](http://localhost:3000) 运行,并可以访问 `./workspace`。要让 OpenDevin 操作你的代码,请将代码放在 `./workspace` 中。 + +OpenDevin 只会访问这个工作区文件夹。它在一个安全的 docker 沙盒中运行,不会影响你系统的其他部分。 + +:::tip +如果你想使用**(不稳定!)**最新版本,可以使用 `ghcr.io/opendevin/opendevin:main` 作为镜像(最后一行)。 +::: + +有关开发工作流程,请参阅 [Development.md](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md)。 + +遇到问题了吗?查看我们的 [故障排除指南](https://opendevin.github.io/OpenDevin/modules/usage/troubleshooting)。 + +:::warning +OpenDevin 目前正在开发中,但你已经可以运行 alpha 版本来查看端到端系统的运作情况。 +::: + +[contributors-shield]: https://img.shields.io/github/contributors/opendevin/opendevin?style=for-the-badge +[contributors-url]: https://github.com/OpenDevin/OpenDevin/graphs/contributors +[forks-shield]: https://img.shields.io/github/forks/opendevin/opendevin?style=for-the-badge +[forks-url]: https://github.com/OpenDevin/OpenDevin/network/members +[stars-shield]: https://img.shields.io/github/stars/opendevin/opendevin?style=for-the-badge +[stars-url]: https://github.com/OpenDevin/OpenDevin/stargazers +[issues-shield]: https://img.shields.io/github/issues/opendevin/opendevin?style=for-the-badge +[issues-url]: https://github.com/OpenDevin/OpenDevin/issues +[license-shield]: https://img.shields.io/github/license/opendevin/opendevin?style=for-the-badge +[license-url]: https://github.com/OpenDevin/OpenDevin/blob/main/LICENSE diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/llms/azureLLMs.md b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/llms/azureLLMs.md new file mode 100644 index 000000000000..40341c3fe97f --- /dev/null +++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/llms/azureLLMs.md @@ -0,0 +1,37 @@ +# Azure OpenAI 大型语言模型 + +## 完成 + +OpenDevin 使用 LiteLLM 进行完成调用。你可以在 Azure 的文档中找到他们的文档 [这里](https://docs.litellm.ai/docs/providers/azure) + +### Azure openai 配置 + +在运行 OpenDevin Docker 镜像时,你需要使用 `-e` 设置以下环境变量: + +``` +LLM_BASE_URL="" # 示例: "https://openai-gpt-4-test-v-1.openai.azure.com/" +LLM_API_KEY="" +LLM_MODEL="azure/" +LLM_API_VERSION = "" # 示例: "2024-02-15-preview" +``` + +:::note +你可以在 Azure 的部署页面找到你的 ChatGPT 部署名称。它可能与默认或最初设置的聊天模型名称相同(例如 'GPT4-1106-preview'),但不一定相同。运行 OpenDevin,当你在浏览器中加载它时,进入设置并按照上述方式设置模型: "azure/<your-actual-gpt-deployment-name>"。如果列表中没有,请输入你自己的文本并保存。 +::: + +## 嵌入 + +OpenDevin 使用 llama-index 进行嵌入。你可以在 Azure 的文档中找到他们的文档 [这里](https://docs.llamaindex.ai/en/stable/api_reference/embeddings/azure_openai/) + +### Azure openai 配置 + +Azure OpenAI 嵌入使用的模型是 "text-embedding-ada-002"。 +你需要在你的 Azure 账户中为这个模型设置正确的部署名称。 + +在 Docker 中运行 OpenDevin 时,使用 `-e` 设置以下环境变量: + +``` +LLM_EMBEDDING_MODEL="azureopenai" +LLM_EMBEDDING_DEPLOYMENT_NAME = "" # 示例: "TextEmbedding..." +LLM_API_VERSION = "" # 示例: "2024-02-15-preview" +``` diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/llms/googleLLMs.md b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/llms/googleLLMs.md new file mode 100644 index 000000000000..8ccaea1ba002 --- /dev/null +++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/llms/googleLLMs.md @@ -0,0 +1,28 @@ +# Google Gemini/Vertex LLM + +## Completion + +OpenDevin 使用 LiteLLM 进行补全调用。以下资源与使用 OpenDevin 和 Google 的 LLM 相关: + +- [Gemini - Google AI Studio](https://docs.litellm.ai/docs/providers/gemini) +- [VertexAI - Google Cloud Platform](https://docs.litellm.ai/docs/providers/vertex) + +### Gemini - Google AI Studio 配置 + +在运行 OpenDevin Docker 镜像时,通过 Google AI Studio 使用 Gemini,你需要使用 `-e` 设置以下环境变量: + +``` +GEMINI_API_KEY="" +LLM_MODEL="gemini/gemini-1.5-pro" +``` + +### Vertex AI - Google Cloud Platform 配置 + +在运行 OpenDevin Docker 镜像时,通过 Google Cloud Platform 使用 Vertex AI,你需要使用 `-e` 设置以下环境变量: + +``` +GOOGLE_APPLICATION_CREDENTIALS="" +VERTEXAI_PROJECT="" +VERTEXAI_LOCATION="" +LLM_MODEL="vertex_ai/" +``` diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/llms/llms.md b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/llms/llms.md new file mode 100644 index 000000000000..0247c1e17399 --- /dev/null +++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/llms/llms.md @@ -0,0 +1,46 @@ +--- +sidebar_position: 2 +--- + +# 🤖 LLM 支持 + +OpenDevin 可以兼容任何 LLM 后端。 +关于所有可用 LM 提供商和模型的完整列表,请参阅 +[litellm 文档](https://docs.litellm.ai/docs/providers)。 + +:::warning +OpenDevin 将向你配置的 LLM 发出许多提示。大多数这些 LLM 都是收费的——请务必设定支出限额并监控使用情况。 +::: + +`LLM_MODEL` 环境变量控制在编程交互中使用的模型。 +但在使用 OpenDevin UI 时,你需要在设置窗口中选择你的模型(左下角的齿轮)。 + +某些 LLM 可能需要以下环境变量: + +- `LLM_API_KEY` +- `LLM_BASE_URL` +- `LLM_EMBEDDING_MODEL` +- `LLM_EMBEDDING_DEPLOYMENT_NAME` +- `LLM_API_VERSION` + +我们有一些指南,介绍了如何使用特定模型提供商运行 OpenDevin: + +- [ollama](llms/localLLMs) +- [Azure](llms/azureLLMs) + +如果你使用其他提供商,我们鼓励你打开一个 PR 来分享你的配置! + +## 关于替代模型的注意事项 + +最好的模型是 GPT-4 和 Claude 3。目前的本地和开源模型 +远没有那么强大。当使用替代模型时, +你可能会看到信息之间的长时间等待, +糟糕的响应,或关于 JSON格式错误的错误。OpenDevin +的强大程度依赖于其驱动的模型——幸运的是,我们团队的人员 +正在积极致力于构建更好的开源模型! + +## API 重试和速率限制 + +一些 LLM 有速率限制,可能需要重试操作。OpenDevin 会在收到 429 错误或 API 连接错误时自动重试请求。 +你可以设置 `LLM_NUM_RETRIES`,`LLM_RETRY_MIN_WAIT`,`LLM_RETRY_MAX_WAIT` 环境变量来控制重试次数和重试之间的时间。 +默认情况下,`LLM_NUM_RETRIES` 为 5,`LLM_RETRY_MIN_WAIT` 和 `LLM_RETRY_MAX_WAIT` 分别为 3 秒和 60 秒。 diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/llms/localLLMs.md b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/llms/localLLMs.md new file mode 100644 index 000000000000..4c065d7ffb73 --- /dev/null +++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/llms/localLLMs.md @@ -0,0 +1,140 @@ +# 使用 Ollama 的本地 LLM + +确保您的 Ollama 服务器已启动运行。有关详细的启动说明,请参阅[此处](https://github.com/ollama/ollama) + +本指南假定您已通过 `ollama serve` 启动 ollama。如果您以其他方式运行 ollama(例如在 docker 内),说明可能需要进行修改。请注意,如果您在运行 WSL,默认的 ollama 配置会阻止来自 docker 容器的请求。请参阅[此处](#configuring-ollama-service-zh-Hans)。 + +## 拉取模型 + +Ollama 模型名称可以在[这里](https://ollama.com/library)找到。一个小例子,您可以使用 +`codellama:7b` 模型。较大的模型通常表现更好。 + +```bash +ollama pull codellama:7b +``` + +您可以这样检查已下载的模型: + +```bash +~$ ollama list +NAME ID SIZE MODIFIED +codellama:7b 8fdf8f752f6e 3.8 GB 6 weeks ago +mistral:7b-instruct-v0.2-q4_K_M eb14864c7427 4.4 GB 2 weeks ago +starcoder2:latest f67ae0f64584 1.7 GB 19 hours ago +``` + +## 启动 OpenDevin + +### Docker + +使用[此处](../intro)的说明通过 Docker 启动 OpenDevin。 +但是在运行 `docker run` 时,您需要添加一些额外的参数: + +```bash +--add-host host.docker.internal:host-gateway \ +-e LLM_API_KEY="ollama" \ +-e LLM_BASE_URL="http://host.docker.internal:11434" \ +``` + +例如: + +```bash +# 您希望 OpenDevin 修改的目录。必须是绝对路径! +export WORKSPACE_BASE=$(pwd)/workspace + +docker run \ + -it \ + --pull=always \ + --add-host host.docker.internal:host-gateway \ + -e SANDBOX_USER_ID=$(id -u) \ + -e LLM_API_KEY="ollama" \ + -e LLM_BASE_URL="http://host.docker.internal:11434" \ + -e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \ + -v $WORKSPACE_BASE:/opt/workspace_base \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -p 3000:3000 \ + ghcr.io/opendevin/opendevin:main +``` + +现在您应该可以连接到 `http://localhost:3000/` + +### 从源代码构建 + +使用[Development.md](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md)中的说明构建 OpenDevin。 +通过运行 `make setup-config` 确保 `config.toml` 存在,这将为您创建一个。在 `config.toml` 中,输入以下内容: + +``` +LLM_MODEL="ollama/codellama:7b" +LLM_API_KEY="ollama" +LLM_EMBEDDING_MODEL="local" +LLM_BASE_URL="http://localhost:11434" +WORKSPACE_BASE="./workspace" +WORKSPACE_DIR="$(pwd)/workspace" +``` + +如有需要,可以替换您选择的 `LLM_MODEL`。 + +完成!现在您可以通过 `make run` 启动 Devin 而无需 Docker。现在您应该可以连接到 `http://localhost:3000/` + +## 选择您的模型 + +在 OpenDevin UI 中,点击左下角的设置齿轮。 +然后在 `Model` 输入中,输入 `ollama/codellama:7b`,或者您之前拉取的模型名称。 +如果它没有出现在下拉列表中,也没关系,只需输入即可。完成后点击保存。 + +现在您已经准备好了! + +## 配置 ollama 服务(WSL){#configuring-ollama-service-zh-Hans} + +WSL 中 ollama 的默认配置仅为 localhost 提供服务。这意味着您无法从 docker 容器中访问它。比如,它不会与 OpenDevin 一起工作。首先让我们测试 ollama 是否正常运行。 + +```bash +ollama list # 获取已安装模型列表 +curl http://localhost:11434/api/generate -d '{"model":"[NAME]","prompt":"hi"}' +#例如,curl http://localhost:11434/api/generate -d '{"model":"codellama:7b","prompt":"hi"}' +#例如,curl http://localhost:11434/api/generate -d '{"model":"codellama","prompt":"hi"}' # 如果只有一个模型,标签是可选的 +``` + +完成后,测试它是否允许“外部”请求,比如那些来自 docker 容器内的请求。 + +```bash +docker ps # 获取正在运行的 docker 容器列表,最准确的测试选择 open devin sandbox 容器。 +docker exec [CONTAINER ID] curl http://host.docker.internal:11434/api/generate -d '{"model":"[NAME]","prompt":"hi"}' +#例如,docker exec cd9cc82f7a11 curl http://host.docker.internal:11434/api/generate -d '{"model":"codellama","prompt":"hi"}' +``` + +## 修复它 + +现在让我们使其工作。使用 sudo 权限编辑 /etc/systemd/system/ollama.service。 (路径可能因 linux 版本而异) + +```bash +sudo vi /etc/systemd/system/ollama.service +``` + +或者 + +```bash +sudo nano /etc/systemd/system/ollama.service +``` + +在 [Service] 括号内添加以下行 + +``` +Environment="OLLAMA_HOST=0.0.0.0:11434" +Environment="OLLAMA_ORIGINS=*" +``` + +然后保存,重新加载配置并重新启动服务。 + +```bash +sudo systemctl daemon-reload +sudo systemctl restart ollama +``` + +最后测试 ollama 是否可以从容器内访问 + +```bash +ollama list # 获取已安装模型列表 +docker ps # 获取正在运行的 docker 容器列表,最准确的测试选择 open devin sandbox 容器。 +docker exec [CONTAINER ID] curl http://host.docker.internal:11434/api/generate -d '{"model":"[NAME]","prompt":"hi"}' +``` diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/troubleshooting/troubleshooting.md b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/troubleshooting/troubleshooting.md new file mode 100644 index 000000000000..cba4488b3f1c --- /dev/null +++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/troubleshooting/troubleshooting.md @@ -0,0 +1,193 @@ +--- +sidebar_position: 5 +--- + +# 🚧 故障排除 + +以下是用户经常报告的一些错误信息。 + +我们将努力使安装过程更加简单,并改善这些错误信息。不过,现在您可以在下面找到您的错误信息,并查看是否有任何解决方法。 + +对于这些错误信息,**都已经有相关的报告**。请不要打开新的报告——只需在现有的报告中发表评论即可。 + +如果您发现更多信息或者一个解决方法,请提交一个 *PR* 来添加细节到这个文件中。 + +:::tip +如果您在 Windows 上运行并遇到问题,请查看我们的[Windows (WSL) 用户指南](troubleshooting/windows)。 +::: + +## 无法连接到 Docker + +[GitHub 问题](https://github.com/OpenDevin/OpenDevin/issues/1226) + +### 症状 + +```bash +创建控制器时出错。请检查 Docker 是否正在运行,并访问 `https://opendevin.github.io/OpenDevin/modules/usage/troubleshooting` 获取更多调试信息。 +``` + +```bash +docker.errors.DockerException: 获取服务器 API 版本时出错: ('连接中止。', FileNotFoundError(2, '没有这样的文件或目录')) +``` + +### 详情 + +OpenDevin 使用 Docker 容器来安全地完成工作,而不会破坏您的机器。 + +### 解决方法 + +* 运行 `docker ps` 以确保 Docker 正在运行 +* 确保您不需要使用 `sudo` 运行 Docker [请参见此处](https://www.baeldung.com/linux/docker-run-without-sudo) +* 如果您使用的是 Mac,请检查[权限要求](https://docs.docker.com/desktop/mac/permission-requirements/) ,特别是考虑在 Docker Desktop 的 `Settings > Advanced` 下启用 `Allow the default Docker socket to be used`。 +* 另外,升级您的 Docker 到最新版本,选择 `Check for Updates` + +## 无法连接到 DockerSSHBox + +[GitHub 问题](https://github.com/OpenDevin/OpenDevin/issues/1156) + +### 症状 + +```python +self.shell = DockerSSHBox( +... +pexpect.pxssh.ExceptionPxssh: Could not establish connection to host +``` + +### 详情 + +默认情况下,OpenDevin 使用 SSH 连接到一个运行中的容器。在某些机器上,尤其是 Windows,这似乎会失败。 + +### 解决方法 + +* 重新启动您的计算机(有时会有用) +* 确保拥有最新版本的 WSL 和 Docker +* 检查您的 WSL 分发版也已更新 +* 尝试[此重新安装指南](https://github.com/OpenDevin/OpenDevin/issues/1156#issuecomment-2064549427) + +## 无法连接到 LLM + +[GitHub 问题](https://github.com/OpenDevin/OpenDevin/issues/1208) + +### 症状 + +```python + File "/app/.venv/lib/python3.12/site-packages/openai/_exceptions.py", line 81, in __init__ + super().__init__(message, response.request, body=body) + ^^^^^^^^^^^^^^^^ +AttributeError: 'NoneType' object has no attribute 'request' +``` + +### 详情 + +[GitHub 问题](https://github.com/OpenDevin/OpenDevin/issues?q=is%3Aissue+is%3Aopen+404) + +这通常发生在本地 LLM 设置中,当 OpenDevin 无法连接到 LLM 服务器时。请参阅我们的 [本地 LLM 指南](llms/localLLMs) 以获取更多信息。 + +### 解决方法 + +* 检查您的 `config.toml` 文件中 "llm" 部分的 `base_url` 是否正确(如果存在) +* 检查 Ollama(或您使用的其他 LLM)是否正常运行 +* 确保在 Docker 中运行时使用 `--add-host host.docker.internal:host-gateway` + +## `404 Resource not found 资源未找到` + +### 症状 + +```python +Traceback (most recent call last): + File "/app/.venv/lib/python3.12/site-packages/litellm/llms/openai.py", line 414, in completion + raise e + File "/app/.venv/lib/python3.12/site-packages/litellm/llms/openai.py", line 373, in completion + response = openai_client.chat.completions.create(**data, timeout=timeout) # type: ignore + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/app/.venv/lib/python3.12/site-packages/openai/_utils/_utils.py", line 277, in wrapper + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + File "/app/.venv/lib/python3.12/site-packages/openai/resources/chat/completions.py", line 579, in create + return self._post( + ^^^^^^^^^^^ + File "/app/.venv/lib/python3.12/site-packages/openai/_base_client.py", line 1232, in post + return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/app/.venv/lib/python3.12/site-packages/openai/_base_client.py", line 921, in request + return self._request( + ^^^^^^^^^^^^^^ + File "/app/.venv/lib/python3.12/site-packages/openai/_base_client.py", line 1012, in _request + raise self._make_status_error_from_response(err.response) from None +openai.NotFoundError: Error code: 404 - {'error': {'code': '404', 'message': 'Resource not found'}} +``` + +### 详情 + +当 LiteLLM(我们用于连接不同 LLM 提供商的库)找不到您要连接的 API 端点时,会发生这种情况。最常见的情况是 Azure 或 Ollama 用户。 + +### 解决方法 + +* 检查您是否正确设置了 `LLM_BASE_URL` +* 检查模型是否正确设置,基于 [LiteLLM 文档](https://docs.litellm.ai/docs/providers) + * 如果您在 UI 中运行,请确保在设置模式中设置 `model` + * 如果您通过 main.py 运行,请确保在环境变量/配置中设置 `LLM_MODEL` +* 确保遵循了您的 LLM 提供商的任何特殊说明 + * [Ollama](/zh-Hans/modules/usage/llms/localLLMs) + * [Azure](/zh-Hans/modules/usage/llms/azureLLMs) + * [Google](/zh-Hans/modules/usage/llms/googleLLMs) +* 确保您的 API 密钥正确无误 +* 尝试使用 `curl` 连接到 LLM +* 尝试[直接通过 LiteLLM 连接](https://github.com/BerriAI/litellm)来测试您的设置 + +## `make build` 在安装包时卡住 + +### 症状 + +安装包时卡在 `Pending...`,没有任何错误信息: + +```bash +Package operations: 286 installs, 0 updates, 0 removals + + - Installing certifi (2024.2.2): Pending... + - Installing h11 (0.14.0): Pending... + - Installing idna (3.7): Pending... + - Installing sniffio (1.3.1): Pending... + - Installing typing-extensions (4.11.0): Pending... +``` + +### 详情 + +在极少数情况下,`make build` 在安装包时似乎会卡住,没有任何错误信息。 + +### 解决方法 + +* 包管理器 Poetry 可能会错过用于查找凭据的配置设置(keyring)。 + +### 解决方法 + +首先使用 `env` 检查是否存在 `PYTHON_KEYRING_BACKEND` 的值。如果不存在,运行以下命令将其设置为已知值,然后重试构建: + +```bash +export PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring +``` + +## 会话未恢复 + +### 症状 + +通常情况下,当打开 UI 时,OpenDevin 会询问是否要恢复或开始新会话。但点击“恢复”仍然会开始一个全新的聊天。 + +### 详情 + +按今天的标准安装,会话数据存储在内存中。目前,如果 OpenDevin 的服务重启,以前的会话将失效(生成一个新秘密),因此无法恢复。 + +### 解决方法 + +* 通过编辑 OpenDevin 根文件夹中的 `config.toml` 文件,更改配置以使会话持久化,指定一个 `file_store` 和一个绝对路径的 `file_store_path`: + +```toml +file_store="local" +file_store_path="/absolute/path/to/opendevin/cache/directory" +``` + +* 在您的 .bashrc 中添加一个固定的 JWT 秘密,如下所示,以便以前的会话 ID 可以被接受。 + +```bash +EXPORT JWT_SECRET=A_CONST_VALUE +``` diff --git a/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/troubleshooting/windows.md b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/troubleshooting/windows.md new file mode 100644 index 000000000000..8ec81dfca1ad --- /dev/null +++ b/docs/i18n/zh-Hans/docusaurus-plugin-content-docs/current/usage/troubleshooting/windows.md @@ -0,0 +1,76 @@ +# Windows 和 WSL 用户须知 + +OpenDevin 仅支持通过 [WSL](https://learn.microsoft.com/en-us/windows/wsl/install) 在 Windows 上运行。 +请确保在 WSL 终端内运行所有命令。 + +## 故障排除 + +### 错误:在此 WSL 2 发行版中找不到 'docker'。 + +如果您使用的是 Docker Desktop,请确保在 WSL 内部调用任何 docker 命令之前启动它。 +Docker 还需要启用 WSL 集成选项。 + +### 建议:不要以 root 用户身份运行 + +出于安全原因,非常建议不要以 root 用户身份运行 OpenDevin,而是使用 UID 非零的用户身份运行。 +此外,当以 root 身份运行时,不支持持久化沙箱,并且在启动 OpenDevin 时可能会出现相应消息。 + +参考资料: + +* [为什么以 root 登录是不好的](https://askubuntu.com/questions/16178/why-is-it-bad-to-log-in-as-root) +* [在 WSL 中设置默认用户](https://www.tenforums.com/tutorials/128152-set-default-user-windows-subsystem-linux-distro-windows-10-a.html#option2) +关于第二个参考资料的小提示:对于 Ubuntu 用户,该命令实际上可能是 "ubuntupreview" 而不是 "ubuntu"。 + +### 创建 opendevin 用户失败 + +如果您在设置过程中遇到以下错误: + +```sh +Exception: Failed to create opendevin user in sandbox: 'useradd: UID 0 is not unique' +``` + +您可以通过运行以下命令解决: + +```sh +export SANDBOX_USER_ID=1000 +``` + +### Poetry 安装 + +* 如果在构建过程中安装 Poetry 后仍然面临运行 Poetry 的问题,您可能需要将其二进制路径添加到您的环境变量: + +```sh +export PATH="$HOME/.local/bin:$PATH" +``` + +* 如果 make build 停止并出现如下错误: + +```sh +ModuleNotFoundError: no module named +``` + +这可能是 Poetry 缓存的问题。 +尝试运行以下两个命令: + +```sh +rm -r ~/.cache/pypoetry +make build +``` + +### NoneType 对象没有属性 'request' + +如果您在执行 `make run` 时遇到与网络相关的问题,例如 `NoneType object has no attribute 'request'`,您可能需要配置您的 WSL2 网络设置。请按照以下步骤操作: + +* 打开或创建位于 Windows 主机机器上的 `C:\Users\%username%\.wslconfig` 文件。 +* 向 `.wslconfig` 文件添加以下配置: + +```sh +[wsl2] +networkingMode=mirrored +localhostForwarding=true +``` + +* 保存 `.wslconfig` 文件。 +* 通过退出所有正在运行的 WSL2 实例并在命令提示符或终端中执行 `wsl --shutdown` 命令,完全重启 WSL2。 +* 重新启动 WSL 后,尝试再次执行 `make run`。 + 网络问题应该已经解决。 diff --git a/docs/i18n/zh-Hans/docusaurus-theme-classic/navbar.json b/docs/i18n/zh-Hans/docusaurus-theme-classic/navbar.json new file mode 100644 index 000000000000..84e9b0ba6405 --- /dev/null +++ b/docs/i18n/zh-Hans/docusaurus-theme-classic/navbar.json @@ -0,0 +1,26 @@ +{ + "title": { + "message": "OpenDevin", + "description": "The title in the navbar" + }, + "logo.alt": { + "message": "OpenDevin", + "description": "The alt text of navbar logo" + }, + "item.label.Docs": { + "message": "文档", + "description": "Navbar item with label Docs" + }, + "item.label.Codebase": { + "message": "代码库", + "description": "Navbar item with label Codebase" + }, + "item.label.FAQ": { + "message": "常见问题", + "description": "Navbar item with label FAQ" + }, + "item.label.GitHub": { + "message": "GitHub", + "description": "Navbar item with label GitHub" + } +} diff --git a/docs/modules/usage/about.md b/docs/modules/usage/about.md index 82c1d5a6f82b..3c80f89a4767 100644 --- a/docs/modules/usage/about.md +++ b/docs/modules/usage/about.md @@ -1,5 +1,5 @@ --- -sidebar_position: 6 +sidebar_position: 8 --- # 📚 Misc @@ -15,7 +15,7 @@ Achieving full replication of production-grade applications with LLMs is a compl ## 🚧 Default Agent -- Our default Agent is currently the MonologueAgent, which has limited capabilities, but is fairly stable. We're working on other Agent implementations, including [SWE Agent](https://swe-agent.com/). You can [read about our current set of agents here](./agents). +- Our default Agent is currently the CodeActAgent, which is capable of generating code and handling files. We're working on other Agent implementations, including [SWE Agent](https://swe-agent.com/). You can [read about our current set of agents here](./agents). ## 🤝 How to Contribute @@ -29,9 +29,9 @@ For details, please check [this document](https://github.com/OpenDevin/OpenDevin ## 🤖 Join Our Community -Now we have both Slack workspace for the collaboration on building OpenDevin and Discord server for discussion about anything related, e.g., this project, LLM, agent, etc. +We have both Slack workspace for the collaboration on building OpenDevin and Discord server for discussion about anything related, e.g., this project, LLM, agent, etc. -- [Slack workspace](https://join.slack.com/t/opendevin/shared_invite/zt-2ggtwn3k5-PvAA2LUmqGHVZ~XzGq~ILw) +- [Slack workspace](https://join.slack.com/t/opendevin/shared_invite/zt-2jsrl32uf-fTeeFjNyNYxqSZt5NPY3fA) - [Discord server](https://discord.gg/ESHStjSjD4) If you would love to contribute, feel free to join our community. Let's simplify software engineering together! diff --git a/docs/modules/usage/agents.md b/docs/modules/usage/agents.md index 72a326353f07..f5723f10d76d 100644 --- a/docs/modules/usage/agents.md +++ b/docs/modules/usage/agents.md @@ -55,12 +55,6 @@ _Example of CodeActAgent with `gpt-4-turbo-2024-04-09` performing a data science | --------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | | `__init__` | Initializes an agent with `llm` and a list of messages `list[Mapping[str, str]]` | | `step` | Performs one step using the CodeAct Agent. This includes gathering info on previous steps and prompting the model to make a command to execute. | -| `search_memory` | Not yet implemented | - -### Work-in-progress & Next step - -[] Support web-browsing -[] Complete the workflow for CodeAct agent to submit Github PRs ## Monologue Agent @@ -77,7 +71,6 @@ Short term memory is stored as a Monologue object and the model can condense it `CmdRunAction`, `FileWriteAction`, `FileReadAction`, -`AgentRecallAction`, `BrowseURLAction`, `GithubPushAction`, `AgentThinkAction` @@ -88,7 +81,6 @@ Short term memory is stored as a Monologue object and the model can condense it `NullObservation`, `CmdOutputObservation`, `FileReadObservation`, -`AgentRecallObservation`, `BrowserOutputObservation` ### Methods @@ -99,7 +91,6 @@ Short term memory is stored as a Monologue object and the model can condense it | `_add_event` | Appends events to the monologue of the agent and condenses with summary automatically if the monologue is too long | | `_initialize` | Utilizes the `INITIAL_THOUGHTS` list to give the agent a context for its capabilities and how to navigate the `/workspace` | | `step` | Modifies the current state by adding the most recent actions and observations, then prompts the model to think about its next action to take. | -| `search_memory` | Uses `VectorIndexRetriever` to find related memories within the long term memory. | ## Planner Agent @@ -112,12 +103,10 @@ The agent is given its previous action-observation pairs, current task, and hint `NullAction`, `CmdRunAction`, -`CmdKillAction`, `BrowseURLAction`, `GithubPushAction`, `FileReadAction`, `FileWriteAction`, -`AgentRecallAction`, `AgentThinkAction`, `AgentFinishAction`, `AgentSummarizeAction`, @@ -130,7 +119,6 @@ The agent is given its previous action-observation pairs, current task, and hint `NullObservation`, `CmdOutputObservation`, `FileReadObservation`, -`AgentRecallObservation`, `BrowserOutputObservation` ### Methods @@ -139,4 +127,3 @@ The agent is given its previous action-observation pairs, current task, and hint | --------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `__init__` | Initializes an agent with `llm` | | `step` | Checks to see if current step is completed, returns `AgentFinishAction` if True. Otherwise, creates a plan prompt and sends to model for inference, adding the result as the next action. | -| `search_memory` | Not yet implemented | diff --git a/docs/modules/usage/architecture.mdx b/docs/modules/usage/architecture.mdx index 99a75502f40b..1a3173e9f7f1 100644 --- a/docs/modules/usage/architecture.mdx +++ b/docs/modules/usage/architecture.mdx @@ -1,16 +1,23 @@ --- -sidebar_position: 4 +sidebar_position: 7 --- # 🏛️ System Architecture Overview +
+ OpenDevin System Architecture Diagram Jul 4 2024 +

OpenDevin System Architecture Diagram (July 4, 2024)

+
+ This is a high-level overview of the system architecture. The system is divided into two main components: the frontend and the backend. The frontend is responsible for handling user interactions and displaying the results. The backend is responsible for handling the business logic and executing the agents. +# Frontend Architecture {#frontend-architecture-en} + ![system_architecture.svg](/img/system_architecture.svg) -This Overview is simplified to show the main components and their interactions. For a more detailed view of the backend architecture, see the [Backend Architecture](#backend-architecture) section. +This Overview is simplified to show the main components and their interactions. For a more detailed view of the backend architecture, see the Backend Architecture section below. -# Backend Architecture +# Backend Architecture {#backend-architecture-en} _**Disclaimer**: The backend architecture is a work in progress and is subject to change. The following diagram shows the current architecture of the backend based on the commit that is shown in the footer of the diagram._ diff --git a/docs/modules/usage/changelog.md b/docs/modules/usage/changelog.md new file mode 100644 index 000000000000..b9cdf20755d5 --- /dev/null +++ b/docs/modules/usage/changelog.md @@ -0,0 +1,75 @@ +--- +sidebar_position: 8 +--- + +# Upgrade Guide + +## 0.8.0 (2024-07-13) + +### Config breaking changes + +In this release we introduced a few breaking changes to backend configurations. +If you have only been using OpenDevin via frontend (web GUI), nothing needs +to be taken care of. + +Here's a list of breaking changes in configs. They only apply to users who +use OpenDevin CLI via `main.py`. For more detail, see [#2756](https://github.com/OpenDevin/OpenDevin/pull/2756). + +#### Removal of --model-name option from main.py + +Please note that `--model-name`, or `-m` option, no longer exists. You should set up the LLM +configs in `config.toml` or via environmental variables. + +#### LLM config groups must be subgroups of 'llm' + +Prior to release 0.8, you can use arbitrary name for llm config in `config.toml`, e.g. + +```toml +[gpt-4o] +model="gpt-4o" +api_key="" +``` + +and then use `--llm-config` CLI argument to specify the desired LLM config group +by name. This no longer works. Instead, the config group must be under `llm` group, +e.g.: + +```toml +[llm.gpt-4o] +model="gpt-4o" +api_key="" +``` + +If you have a config group named `llm`, no need to change it, it will be used +as the default LLM config group. + +#### 'agent' group no longer contains 'name' field + +Prior to release 0.8, you may or may not have a config group named `agent` that +looks like this: + +```toml +[agent] +name="CodeActAgent" +memory_max_threads=2 +``` + +Note the `name` field is now removed. Instead, you should put `default_agent` field +under `core` group, e.g. + +```toml +[core] +# other configs +default_agent='CodeActAgent' + +[agent] +llm_config='llm' +memory_max_threads=2 + +[agent.CodeActAgent] +llm_config='gpt-4o' +``` + +Note that similar to `llm` subgroups, you can also define `agent` subgroups. +Moreover, an agent can be associated with a specific LLM config group. For more +detail, see the examples in `config.template.toml`. diff --git a/docs/modules/usage/custom_sandbox_guide.md b/docs/modules/usage/custom_sandbox_guide.md new file mode 100644 index 000000000000..125fb4b7547a --- /dev/null +++ b/docs/modules/usage/custom_sandbox_guide.md @@ -0,0 +1,129 @@ +--- +sidebar_position: 6 +--- + +# 💿 How to Create and Use a Custom Docker Sandbox + +The default OpenDevin sandbox comes with a [minimal ubuntu configuration](https://github.com/OpenDevin/OpenDevin/blob/main/containers/sandbox/Dockerfile). + +Your use case may need additional software installed by default. + +There are two ways you can do so: +1. Use an existing image from docker hub. For instance, if you want to have `nodejs` installed, you can do so by using the `node:20` image +2. Creating your own custom docker image and using it + +If you want to take the first approach, you can skip the `Create Your Docker Image` section. + +## Setup + +Make sure you are able to run OpenDevin using the [Development.md](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md) first. + +## Create Your Docker Image +To create a custom docker image, it must be debian/ubuntu based. + +For example, if we want OpenDevin to have access to the `node` binary, we would use the following Dockerfile: + +```dockerfile +# Start with latest ubuntu image +FROM ubuntu:latest + +# Run needed updates +RUN apt-get update && apt-get install -y + +# Install node +RUN apt-get install -y nodejs +``` + +Next build your docker image with the name of your choice, for example `custom_image`. + +To do this you can create a directory and put your file inside it with the name `Dockerfile`, and inside the directory run the following command: + +```bash +docker build -t custom_image . +``` + +This will produce a new image called ```custom_image``` that will be available in Docker Engine. + +> Note that in the configuration described in this document, OpenDevin will run as user "opendevin" inside the sandbox and thus all packages installed via the docker file should be available to all users on the system, not just root. +> +> Installing with apt-get above installs node for all users. + +## Specify your sandbox image in config.toml file + +OpenDevin configuration occurs via the top-level `config.toml` file. + +Create a `config.toml` file in the OpenDevin directory and enter these contents: + +```toml +[core] +workspace_base="./workspace" +persist_sandbox=false +run_as_devin=true +sandbox_container_image="custom_image" +``` + +For `sandbox_container_image`, you can specify either: +1. The name of your custom image that you built in the previous step (e.g., `”custom_image”`) +2. A pre-existing image from Docker Hub (e.g., `”node:20”` if you want a sandbox with Node.js pre-installed) + +## Run +Run OpenDevin by running ```make run``` in the top level directory. + +Navigate to ```localhost:3001``` and check if your desired dependencies are available. + +In the case of the example above, running ```node -v``` in the terminal produces ```v20.15.0``` + +Congratulations! + +## Technical Explanation + +The relevant code is defined in [ssh_box.py](https://github.com/OpenDevin/OpenDevin/blob/main/opendevin/runtime/docker/ssh_box.py) and [image_agnostic_util.py](https://github.com/OpenDevin/OpenDevin/blob/main/opendevin/runtime/docker/image_agnostic_util.py). + +In particular, ssh_box.py checks the config object for ```config.sandbox_container_image``` and then attempts to retrieve the image using [get_od_sandbox_image](https://github.com/OpenDevin/OpenDevin/blob/main/opendevin/runtime/docker/image_agnostic_util.py#L72) which is defined in image_agnostic_util.py. + +When first using a custom image, it will not be found and thus it will be built (on subsequent runs the built image will be found and returned). + +The custom image is built using [_build_sandbox_image()](https://github.com/OpenDevin/OpenDevin/blob/main/opendevin/runtime/docker/image_agnostic_util.py#L29), which creates a docker file using your custom_image as a base and then configures the environment for OpenDevin, like this: + +```python +dockerfile_content = ( + f'FROM {base_image}\n' + 'RUN apt update && apt install -y openssh-server wget sudo\n' + 'RUN mkdir -p -m0755 /var/run/sshd\n' + 'RUN mkdir -p /opendevin && mkdir -p /opendevin/logs && chmod 777 /opendevin/logs\n' + 'RUN echo "" > /opendevin/bash.bashrc\n' + 'RUN if [ ! -d /opendevin/miniforge3 ]; then \\\n' + ' wget "https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-$(uname)-$(uname -m).sh" && \\\n' + ' bash Miniforge3-$(uname)-$(uname -m).sh -b -p /opendevin/miniforge3 && \\\n' + ' chmod -R g+w /opendevin/miniforge3 && \\\n' + ' bash -c ". /opendevin/miniforge3/etc/profile.d/conda.sh && conda config --set changeps1 False && conda config --append channels conda-forge"; \\\n' + ' fi\n' + 'RUN /opendevin/miniforge3/bin/pip install --upgrade pip\n' + 'RUN /opendevin/miniforge3/bin/pip install jupyterlab notebook jupyter_kernel_gateway flake8\n' + 'RUN /opendevin/miniforge3/bin/pip install python-docx PyPDF2 python-pptx pylatexenc openai\n' +).strip() +``` + +> Note: the name of the image is modified via [_get_new_image_name()](https://github.com/OpenDevin/OpenDevin/blob/main/opendevin/runtime/docker/image_agnostic_util.py#L63) and it is the modified name that is searched for on subsequent runs. + +## Troubleshooting / Errors + +### Error: ```useradd: UID 1000 is not unique``` +If you see this error in the console output it is because OpenDevin is trying to create the opendevin user in the sandbox with a UID of 1000, however this UID is already being used in the image (for some reason). To fix this change the sandbox_user_id field in the config.toml file to a different value: + +```toml +[core] +workspace_base="./workspace" +persist_sandbox=false +run_as_devin=true +sandbox_container_image="custom_image" +sandbox_user_id="1001" +``` + +### Port use errors + +If you see an error about a port being in use or unavailable, try deleting all running Docker Containers (run `docker ps` and `docker rm` relevant containers) and then re-running ```make run``` + +## Discuss + +For other issues or questions join the [Slack](https://join.slack.com/t/opendevin/shared_invite/zt-2jsrl32uf-fTeeFjNyNYxqSZt5NPY3fA) or [Discord](https://discord.gg/ESHStjSjD4) and ask! diff --git a/docs/modules/usage/feedback.md b/docs/modules/usage/feedback.md new file mode 100644 index 000000000000..b5b0152f7c5c --- /dev/null +++ b/docs/modules/usage/feedback.md @@ -0,0 +1,43 @@ +--- +sidebar_position: 5 +--- + +# ✅ Providing Feedback + +When using OpenDevin, you will undoubtably encounter cases where things work well, and others where they don't. We encourage you to provide feedback when you use OpenDevin to help give feedback to the development team, and perhaps more importantly, create an open corpus of coding agent training examples -- Share-OpenDevin! + +## 📝 How to Provide Feedback + +Providing feedback is easy! When you are using OpenDevin, you can press the thumbs-up or thumbs-down button at any point during your interaction with. You will be prompted to provide your email address (e.g. so we can contact you if we want to ask any follow-up questions), and you can choose whether you want to provide feedback publicly or privately. + + + +## 📜 Data Use and Privacy + +### Data sharing settings + +When you submit data, you can submit it either publicly or privately. + +* **Public** data will be distributed under the MIT License, like OpenDevin itself, and can be used by the community to train and test models. Obviously, feedback that you can make public will be more valuable for the community as a whole, so when you are not dealing with sensitive information, we would encourage you to choose this option! +* **Private** data will only be shared with the OpenDevin team for the purpose of improving OpenDevin. + +### Who collects and stores the data? + +The data is collected and stored by [All Hands AI](https://all-hands.dev), a company founded by OpenDevin maintainers to support and improve OpenDevin. + +### How will public data be released? + +The public data will be released when we hit fixed milestones, such as 1,000 public examples, 10,000 public examples, etc. +At this time, we will follow the following release process: + +1. All people who contributed public feedback will receive an email describing the data release and being given an opportunity to opt out. +2. The person or people in charge of the data release will perform quality control of the data, removing low-quality feedback, removing email submitter email addresses, and attempting to remove any sensitive information such as API keys. +3. The data will be released publicly under the MIT license through commonly used sites such as github or Hugging Face. + +### What if I want my data deleted? + +For data on the All Hands AI servers, we are happy to delete it at request: + +**One Piece of Data:** If you want one piece of data deleted, we will shortly be adding a mechanism to delete pieces of data using the link and password that is displayed on the interface when you submit data. + +**All Data:** If you would like all pieces of your data deleted, or you do not have the ID and password that you received when submitting the data, please contact `contact@all-hands.dev` from the email address that you registered when you originally submitted the data. diff --git a/docs/modules/usage/intro.mdx b/docs/modules/usage/intro.mdx index f794da81f8db..69dc6ea3c66b 100644 --- a/docs/modules/usage/intro.mdx +++ b/docs/modules/usage/intro.mdx @@ -42,7 +42,7 @@ Explore the codebase of OpenDevin on [GitHub](https://github.com/OpenDevin/OpenD />

- + Join our Slack community" -LLM_API_VERSION = "" # e.g. "2024-02-15-preview" +LLM_EMBEDDING_DEPLOYMENT_NAME="" # e.g. "TextEmbedding..." +LLM_API_VERSION="" # e.g. "2024-02-15-preview" ``` diff --git a/docs/modules/usage/llms/llms.md b/docs/modules/usage/llms/llms.md index 46bac01b2989..08b1b1fff601 100644 --- a/docs/modules/usage/llms/llms.md +++ b/docs/modules/usage/llms/llms.md @@ -13,8 +13,7 @@ OpenDevin will issue many prompts to the LLM you configure. Most of these LLMs c ::: The `LLM_MODEL` environment variable controls which model is used in programmatic interactions. -But when using the OpenDevin UI, you'll need to choose your model in the settings window (the gear -wheel on the bottom left). +But when using the OpenDevin UI, you'll need to choose your model in the settings window. The following environment variables might be necessary for some LLMs: diff --git a/docs/modules/usage/llms/localLLMs.md b/docs/modules/usage/llms/localLLMs.md index 5da05d98397d..a2bbb138a9ee 100644 --- a/docs/modules/usage/llms/localLLMs.md +++ b/docs/modules/usage/llms/localLLMs.md @@ -3,7 +3,7 @@ Ensure that you have the Ollama server up and running. For detailed startup instructions, refer to [here](https://github.com/ollama/ollama) -This guide assumes you've started ollama with `ollama serve`. If you're running ollama differently (e.g. inside docker), the instructions might need to be modified. Please note that if you're running WSL the default ollama configuration blocks requests from docker containers. See [here](#configuring-the-ollama-service-wsl). +This guide assumes you've started ollama with `ollama serve`. If you're running ollama differently (e.g. inside docker), the instructions might need to be modified. Please note that if you're running WSL the default ollama configuration blocks requests from docker containers. See [here](#configuring-ollama-service-wsl-en). ## Pull Models @@ -35,9 +35,12 @@ But when running `docker run`, you'll need to add a few more arguments: --add-host host.docker.internal:host-gateway \ -e LLM_API_KEY="ollama" \ -e LLM_BASE_URL="http://host.docker.internal:11434" \ +-e LLM_OLLAMA_BASE_URL="http://host.docker.internal:11434" \ ``` -For example: +LLM_OLLAMA_BASE_URL is optional. If you set it, it will be used to show the available installed models in the UI. + +Example: ```bash # The directory you want OpenDevin to modify. MUST be an absolute path! @@ -50,6 +53,7 @@ docker run \ -e SANDBOX_USER_ID=$(id -u) \ -e LLM_API_KEY="ollama" \ -e LLM_BASE_URL="http://host.docker.internal:11434" \ + -e LLM_OLLAMA_BASE_URL="http://host.docker.internal:11434" \ -e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \ -v $WORKSPACE_BASE:/opt/workspace_base \ -v /var/run/docker.sock:/var/run/docker.sock \ @@ -65,12 +69,16 @@ Use the instructions in [Development.md](https://github.com/OpenDevin/OpenDevin/ Make sure `config.toml` is there by running `make setup-config` which will create one for you. In `config.toml`, enter the followings: ``` -LLM_MODEL="ollama/codellama:7b" -LLM_API_KEY="ollama" -LLM_EMBEDDING_MODEL="local" -LLM_BASE_URL="http://localhost:11434" -WORKSPACE_BASE="./workspace" -WORKSPACE_DIR="$(pwd)/workspace" +[core] +workspace_base="./workspace" + +[llm] +model="ollama/codellama:7b" +api_key="ollama" +embedding_model="local" +base_url="http://localhost:11434" +ollama_base_url="http://localhost:11434" + ``` Replace `LLM_MODEL` of your choice if you need to. @@ -85,7 +93,7 @@ If it doesn’t show up in a dropdown, that’s fine, just type it in. Click Sav And now you're ready to go! -## Configuring the ollama service (WSL) +## Configuring the ollama service (WSL) {#configuring-ollama-service-wsl-en} The default configuration for ollama in WSL only serves localhost. This means you can't reach it from a docker container. eg. it wont work with OpenDevin. First let's test that ollama is running correctly. @@ -139,3 +147,62 @@ ollama list # get list of installed models docker ps # get list of running docker containers, for most accurate test choose the open devin sandbox container. docker exec [CONTAINER ID] curl http://host.docker.internal:11434/api/generate -d '{"model":"[NAME]","prompt":"hi"}' ``` + + +# Local LLM with LM Studio + +Steps to set up LM Studio: +1. Open LM Studio +2. Go to the Local Server tab. +3. Click the "Start Server" button. +4. Select the model you want to use from the dropdown. + + +Set the following configs: +```bash +LLM_MODEL="openai/lmstudio" +LLM_BASE_URL="http://localhost:1234/v1" +CUSTOM_LLM_PROVIDER="openai" +``` + +### Docker + +```bash +docker run \ + -it \ + --pull=always \ + -e SANDBOX_USER_ID=$(id -u) \ + -e LLM_MODEL="openai/lmstudio" + -e LLM_BASE_URL="http://host.docker.internal:1234/v1" \ + -e CUSTOM_LLM_PROVIDER="openai" + -e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \ + -v $WORKSPACE_BASE:/opt/workspace_base \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -p 3000:3000 \ + ghcr.io/opendevin/opendevin:main +``` + +You should now be able to connect to `http://localhost:3000/` + +In the development environment, you can set the following configs in the `config.toml` file: + +``` +[core] +workspace_base="./workspace" + +[llm] +model="openai/lmstudio" +base_url="http://localhost:1234/v1" +custom_llm_provider="openai" +``` + +Done! Now you can start Devin by: `make run` without Docker. You now should be able to connect to `http://localhost:3000/` + +# Note: + +For WSL, run the following commands in cmd to set up the networking mode to mirrored: + +``` +python -c "print('[wsl2]\nnetworkingMode=mirrored',file=open(r'%UserProfile%\.wslconfig','w'))" +wsl --shutdown +``` diff --git a/docs/modules/usage/troubleshooting/troubleshooting.md b/docs/modules/usage/troubleshooting/troubleshooting.md index 3dfcbfd9eef2..61af30fbaeb8 100644 --- a/docs/modules/usage/troubleshooting/troubleshooting.md +++ b/docs/modules/usage/troubleshooting/troubleshooting.md @@ -1,100 +1,115 @@ --- -sidebar_position: 5 +sidebar_position: 4 --- # 🚧 Troubleshooting -There are some error messages that get reported over and over by users. -We'll try to make the install process easier, and to make these error messages -better in the future. But for now, you can look for your error message below, -and see if there are any workaround. +There are some error messages that frequently get reported by users. + +We'll try to make the install process easier and these error messages +better in the future. But for now, you can look for your error message below and see if there are any workarounds. For each of these error messages **there is an existing issue**. Please do not -open an new issue--just comment there. +open a new issue--just comment there. If you find more information or a workaround for one of these issues, please -open a PR to add details to this file. +open a *PR* to add details to this file. :::tip -If you're running on Windows and having trouble, check out our [guide for Windows users](troubleshooting/windows) +If you're running on Windows and having trouble, check out our [guide for Windows (WSL) users](troubleshooting/windows). ::: -## Unable to connect to docker +## Common Issues + +* [Unable to connect to Docker](#unable-to-connect-to-docker) +* [Unable to connect to SSH box](#unable-to-connect-to-ssh-box) +* [404 Resource not found](#404-resource-not-found) +* [`make build` getting stuck on package installations](#make-build-getting-stuck-on-package-installations) +* [Sessions are not restored](#sessions-are-not-restored) + +### Unable to connect to Docker [GitHub Issue](https://github.com/OpenDevin/OpenDevin/issues/1226) -### Symptoms +**Symptoms** -``` +```bash Error creating controller. Please check Docker is running and visit `https://opendevin.github.io/OpenDevin/modules/usage/troubleshooting` for more debugging information. ``` -``` +```bash docker.errors.DockerException: Error while fetching server API version: ('Connection aborted.', FileNotFoundError(2, 'No such file or directory')) ``` -### Details +**Details** -OpenDevin uses a docker container to do its work safely, without potentially breaking your machine. +OpenDevin uses a Docker container to do its work safely, without potentially breaking your machine. -### Workarounds +**Workarounds** * Run `docker ps` to ensure that docker is running * Make sure you don't need `sudo` to run docker [see here](https://www.baeldung.com/linux/docker-run-without-sudo) -* If you are on a mac, check the [permissions requirements](https://docs.docker.com/desktop/mac/permission-requirements/) and in particular consider enabling the "Allow the default Docker socket to be used" under "Settings > Advanced" in Docker Desktop. -* If you are on a mac, Upgrade your Docker to the latest version under "Check for Updates" +* If you are on a Mac, check the [permissions requirements](https://docs.docker.com/desktop/mac/permission-requirements/) and in particular consider enabling the `Allow the default Docker socket to be used` under `Settings > Advanced` in Docker Desktop. +* In addition, upgrade your Docker to the latest version under `Check for Updates` + +--- +### Unable to connect to SSH box -## Unable to connect to SSH box [GitHub Issue](https://github.com/OpenDevin/OpenDevin/issues/1156) -### Symptoms +**Symptoms** -``` +```python self.shell = DockerSSHBox( ... pexpect.pxssh.ExceptionPxssh: Could not establish connection to host ``` -### Details +**Details** By default, OpenDevin connects to a running container using SSH. On some machines, especially Windows, this seems to fail. -### Workarounds +**Workarounds** -- Restart your computer (sometimes works?) -- Be sure to have the latest versions of WSL and Docker -- Try [this reinstallation guide](https://github.com/OpenDevin/OpenDevin/issues/1156#issuecomment-2064549427) -- Set `-e SANDBOX_TYPE=exec` to switch to the ExecBox docker container +* Restart your computer (sometimes it does work) +* Be sure to have the latest versions of WSL and Docker +* Check that your distribution in WSL is up to date as well +* Try [this reinstallation guide](https://github.com/OpenDevin/OpenDevin/issues/1156#issuecomment-2064549427) + +--- +### Unable to connect to LLM -## Unable to connect to LLM [GitHub Issue](https://github.com/OpenDevin/OpenDevin/issues/1208) -### Symptoms +**Symptoms** -``` +```python File "/app/.venv/lib/python3.12/site-packages/openai/_exceptions.py", line 81, in __init__ super().__init__(message, response.request, body=body) ^^^^^^^^^^^^^^^^ AttributeError: 'NoneType' object has no attribute 'request' ``` -### Details +**Details** [GitHub Issues](https://github.com/OpenDevin/OpenDevin/issues?q=is%3Aissue+is%3Aopen+404) -This usually happens with local LLM setups, when OpenDevin can't connect to the LLM server. +This usually happens with *local* LLM setups, when OpenDevin can't connect to the LLM server. See our guide for [local LLMs](llms/localLLMs) for more information. -### Workarounds +**Workarounds** -- Check your `LLM_BASE_URL` -- Check that ollama is running OK -- Make sure you're using `--add-host host.docker.internal:host-gateway` when running in docker +* Check your `base_url` in your config.toml (if it exists) under the "llm" section +* Check that ollama (or whatever LLM you're using) is running OK +* Make sure you're using `--add-host host.docker.internal:host-gateway` when running in Docker -## 404 Resource not found -### Symptoms -``` +--- +### `404 Resource not found` + +**Symptoms** + +```python Traceback (most recent call last): File "/app/.venv/lib/python3.12/site-packages/litellm/llms/openai.py", line 414, in completion raise e @@ -118,19 +133,86 @@ Traceback (most recent call last): openai.NotFoundError: Error code: 404 - {'error': {'code': '404', 'message': 'Resource not found'}} ``` -### Details +**Details** + This happens when LiteLLM (our library for connecting to different LLM providers) can't find -the API you're trying to connect to. Most often this happens for Azure or ollama users. - -### Workarounds -- Check that you've set `LLM_BASE_URL` properly -- Check that model is set properly, based on the [LiteLLM docs](https://docs.litellm.ai/docs/providers) - - If you're running inside the UI, be sure to set the `model` in the settings modal - - If you're running headless (via main.py) be sure to set `LLM_MODEL` in your env/config -- Make sure you've followed any special instructions for your LLM provider - - [ollama](/OpenDevin/modules/usage/llms/localLLMs) - - [Azure](/OpenDevin/modules/usage/llms/azureLLMs) - - [Google](/OpenDevin/modules/usage/llms/googleLLMs) -- Make sure your API key is correct -- See if you can connect to the LLM using `curl` -- Try [connecting via LiteLLM directly](https://github.com/BerriAI/litellm) to test your setup +the API endpoint you're trying to connect to. Most often this happens for Azure or ollama users. + +**Workarounds** + +* Check that you've set `LLM_BASE_URL` properly +* Check that model is set properly, based on the [LiteLLM docs](https://docs.litellm.ai/docs/providers) + * If you're running inside the UI, be sure to set the `model` in the settings modal + * If you're running headless (via main.py) be sure to set `LLM_MODEL` in your env/config +* Make sure you've followed any special instructions for your LLM provider + * [ollama](/modules/usage/llms/localLLMs) + * [Azure](/modules/usage/llms/azureLLMs) + * [Google](/modules/usage/llms/googleLLMs) +* Make sure your API key is correct +* See if you can connect to the LLM using `curl` +* Try [connecting via LiteLLM directly](https://github.com/BerriAI/litellm) to test your setup + +--- +### `make build` getting stuck on package installations + +**Symptoms** + +Package installation stuck on `Pending...` without any error message: + +```bash +Package operations: 286 installs, 0 updates, 0 removals + + - Installing certifi (2024.2.2): Pending... + - Installing h11 (0.14.0): Pending... + - Installing idna (3.7): Pending... + - Installing sniffio (1.3.1): Pending... + - Installing typing-extensions (4.11.0): Pending... +``` + +**Details** + +In rare cases, `make build` can seemingly get stuck on package installations +without any error message. + +**Workarounds** + +The package installer Poetry may miss a configuration setting for where credentials are to be looked up (keyring). + +First check with `env` if a value for `PYTHON_KEYRING_BACKEND` exists. +If not, run the below command to set it to a known value and retry the build: + +```bash +export PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring +``` + +--- +### Sessions are not restored + +**Symptoms** + +OpenDevin usually asks whether to resume or start a new session when opening the UI. +But clicking "Resume" still starts a fresh new chat. + +**Details** + +With a standard installation as of today session data is stored in memory. +Currently, if OpenDevin's service is restarted, previous sessions become +invalid (a new secret is generated) and thus not recoverable. + +**Workarounds** + +* Change configuration to make sessions persistent by editing the `config.toml` +file (in OpenDevin's root folder) by specifying a `file_store` and an +absolute `file_store_path`: + +```toml +file_store="local" +file_store_path="/absolute/path/to/opendevin/cache/directory" +``` + +* Add a fixed jwt secret in your .bashrc, like below, so that previous session id's +should stay accepted. + +```bash +EXPORT JWT_SECRET=A_CONST_VALUE +``` diff --git a/docs/modules/usage/troubleshooting/windows.md b/docs/modules/usage/troubleshooting/windows.md index 0bb904b00787..213903a013a2 100644 --- a/docs/modules/usage/troubleshooting/windows.md +++ b/docs/modules/usage/troubleshooting/windows.md @@ -5,32 +5,76 @@ Please be sure to run all commands inside your WSL terminal. ## Troubleshooting +### Recommendation: Do not run as root user + +For security reasons, it is highly recommended to not run OpenDevin as the root user, but a user with a non-zero UID. +In addition, persistent sandboxes won't be supported when running as root and during start of OpenDevin an appropriate message may appear. + +References: + +* [Why it is bad to login as root](https://askubuntu.com/questions/16178/why-is-it-bad-to-log-in-as-root) +* [Set default user in WSL](https://www.tenforums.com/tutorials/128152-set-default-user-windows-subsystem-linux-distro-windows-10-a.html#option2) +Hint about the 2nd reference: for Ubuntu users, the command could actually be "ubuntupreview" instead of "ubuntu". + +--- +### Error: 'docker' could not be found in this WSL 2 distro. + +If you are using Docker Desktop, make sure to start it before calling any docker command from inside WSL. +Docker also needs to have the WSL integration option activated. + +--- ### Failed to create opendevin user -If you encounter the following error during setup: `Exception: Failed to create opendevin user in sandbox: b'useradd: UID 0 is not unique\n'`. +If you encounter the following error during setup: + +```sh +Exception: Failed to create opendevin user in sandbox: 'useradd: UID 0 is not unique' + ``` + You can resolve it by running: -` export SANDBOX_USER_ID=1000 - ` +```sh +export SANDBOX_USER_ID=1000 +``` + +--- ### Poetry Installation -If you face issues running Poetry even after installing it during the build process, you may need to add its binary path to your environment: -` export PATH="$HOME/.local/bin:$PATH" - ` +* If you face issues running Poetry even after installing it during the build process, you may need to add its binary path to your environment: +```sh +export PATH="$HOME/.local/bin:$PATH" +``` + +* If make build stops on an error like this: + +```sh +ModuleNotFoundError: no module named +``` + +This could be an issue with Poetry's cache. +Try to run these 2 commands after another: + +```sh +rm -r ~/.cache/pypoetry +make build +``` + +--- ### NoneType object has no attribute 'request' If you are experiencing issues related to networking, such as `NoneType object has no attribute 'request'` when executing `make run`, you may need to configure your WSL2 networking settings. Follow these steps: -- Open or create the `.wslconfig` file located at `C:\Users\%username%\.wslconfig` on your Windows host machine. -- Add the following configuration to the `.wslconfig` file: +* Open or create the `.wslconfig` file located at `C:\Users\%username%\.wslconfig` on your Windows host machine. +* Add the following configuration to the `.wslconfig` file: -``` +```sh [wsl2] networkingMode=mirrored localhostForwarding=true ``` -- Save the `.wslconfig` file. -- Restart WSL2 completely by exiting any running WSL2 instances and executing the command `wsl --shutdown` in your command prompt or terminal. -- After restarting WSL, attempt to execute `make run` again. The networking issue should be resolved. +* Save the `.wslconfig` file. +* Restart WSL2 completely by exiting any running WSL2 instances and executing the command `wsl --shutdown` in your command prompt or terminal. +* After restarting WSL, attempt to execute `make run` again. +The networking issue should be resolved. \ No newline at end of file diff --git a/docs/package-lock.json b/docs/package-lock.json index 135732840b1d..a93b4cd54955 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -8,24 +8,22 @@ "name": "docs", "version": "0.0.0", "dependencies": { - "@docusaurus/core": "3.2.1", - "@docusaurus/preset-classic": "3.2.1", + "@docusaurus/core": "^3.4.0", + "@docusaurus/plugin-content-pages": "^3.4.0", + "@docusaurus/preset-classic": "^3.4.0", "@mdx-js/react": "^3.0.0", "clsx": "^2.0.0", "prism-react-renderer": "^2.3.0", - "react": "^18.0.0", - "react-dom": "^18.0.0", + "react": "^18.3.1", + "react-dom": "^18.3.1", "react-icons": "^5.2.1", "react-use": "^17.5.0" }, "devDependencies": { - "@docusaurus/module-type-aliases": "3.2.1", - "@docusaurus/tsconfig": "3.2.1", - "@docusaurus/types": "3.2.1", - "autoprefixer": "^10.4.19", - "postcss": "^8.4.38", - "tailwindcss": "^3.4.3", - "typescript": "~5.2.2" + "@docusaurus/module-type-aliases": "^3.4.0", + "@docusaurus/tsconfig": "^3.4.0", + "@docusaurus/types": "^3.4.0", + "typescript": "~5.5.3" }, "engines": { "node": ">=18.0" @@ -210,18 +208,6 @@ "@algolia/requester-common": "4.23.3" } }, - "node_modules/@alloc/quick-lru": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", - "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/@ampproject/remapping": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", @@ -502,9 +488,9 @@ } }, "node_modules/@babel/helper-plugin-utils": { - "version": "7.24.0", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.0.tgz", - "integrity": "sha512-9cUznXMG0+FxRuJfvL82QlTqIzhVW9sL0KjMPHhAOOvpQGL8QtdxnBKILjBqxlHyliz0yCa1G903ZXI/FuHy2w==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.7.tgz", + "integrity": "sha512-Rq76wjt7yz9AAc1KnlRKNAi/dMSVWgDRx43FHoJEbcYU6xOWaE2dVPwcdTukJrjxS65GITyfbvEYHvkirZ6uEg==", "engines": { "node": ">=6.9.0" } @@ -1594,11 +1580,11 @@ } }, "node_modules/@babel/plugin-transform-react-constant-elements": { - "version": "7.24.1", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.24.1.tgz", - "integrity": "sha512-QXp1U9x0R7tkiGB0FOk8o74jhnap0FlZ5gNkRIWdG3eP+SvMFg118e1zaWewDzgABb106QSKpVsD3Wgd8t6ifA==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.24.7.tgz", + "integrity": "sha512-7LidzZfUXyfZ8/buRW6qIIHBY8wAZ1OrY9c/wTr8YhZ6vMPo+Uc/CVFLYY1spZrEQlD4w5u8wjqk5NQ3OVqQKA==", "dependencies": { - "@babel/helper-plugin-utils": "^7.24.0" + "@babel/helper-plugin-utils": "^7.24.7" }, "engines": { "node": ">=6.9.0" @@ -2151,9 +2137,9 @@ } }, "node_modules/@docusaurus/core": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-3.2.1.tgz", - "integrity": "sha512-ZeMAqNvy0eBv2dThEeMuNzzuu+4thqMQakhxsgT5s02A8LqRcdkg+rbcnuNqUIpekQ4GRx3+M5nj0ODJhBXo9w==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-3.4.0.tgz", + "integrity": "sha512-g+0wwmN2UJsBqy2fQRQ6fhXruoEa62JDeEa5d8IdTJlMoaDaEDfHh7WjwGRn4opuTQWpjAwP/fbcgyHKlE+64w==", "dependencies": { "@babel/core": "^7.23.3", "@babel/generator": "^7.23.3", @@ -2165,14 +2151,12 @@ "@babel/runtime": "^7.22.6", "@babel/runtime-corejs3": "^7.22.6", "@babel/traverse": "^7.22.8", - "@docusaurus/cssnano-preset": "3.2.1", - "@docusaurus/logger": "3.2.1", - "@docusaurus/mdx-loader": "3.2.1", - "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/utils": "3.2.1", - "@docusaurus/utils-common": "3.2.1", - "@docusaurus/utils-validation": "3.2.1", - "@svgr/webpack": "^6.5.1", + "@docusaurus/cssnano-preset": "3.4.0", + "@docusaurus/logger": "3.4.0", + "@docusaurus/mdx-loader": "3.4.0", + "@docusaurus/utils": "3.4.0", + "@docusaurus/utils-common": "3.4.0", + "@docusaurus/utils-validation": "3.4.0", "autoprefixer": "^10.4.14", "babel-loader": "^9.1.3", "babel-plugin-dynamic-import-node": "^2.3.3", @@ -2186,8 +2170,8 @@ "copy-webpack-plugin": "^11.0.0", "core-js": "^3.31.1", "css-loader": "^6.8.1", - "css-minimizer-webpack-plugin": "^4.2.2", - "cssnano": "^5.1.15", + "css-minimizer-webpack-plugin": "^5.0.1", + "cssnano": "^6.1.2", "del": "^6.1.1", "detect-port": "^1.5.1", "escape-html": "^1.0.3", @@ -2207,7 +2191,7 @@ "prompts": "^2.4.2", "react-dev-utils": "^12.0.1", "react-helmet-async": "^1.3.0", - "react-loadable": "npm:@docusaurus/react-loadable@5.5.2", + "react-loadable": "npm:@docusaurus/react-loadable@6.0.0", "react-loadable-ssr-addon-v5-slorber": "^1.0.1", "react-router": "^5.3.4", "react-router-config": "^5.1.1", @@ -2238,13 +2222,13 @@ } }, "node_modules/@docusaurus/cssnano-preset": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-3.2.1.tgz", - "integrity": "sha512-wTL9KuSSbMJjKrfu385HZEzAoamUsbKqwscAQByZw4k6Ja/RWpqgVvt/CbAC+aYEH6inLzOt+MjuRwMOrD3VBA==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-3.4.0.tgz", + "integrity": "sha512-qwLFSz6v/pZHy/UP32IrprmH5ORce86BGtN0eBtG75PpzQJAzp9gefspox+s8IEOr0oZKuQ/nhzZ3xwyc3jYJQ==", "dependencies": { - "cssnano-preset-advanced": "^5.3.10", - "postcss": "^8.4.26", - "postcss-sort-media-queries": "^4.4.1", + "cssnano-preset-advanced": "^6.1.2", + "postcss": "^8.4.38", + "postcss-sort-media-queries": "^5.2.0", "tslib": "^2.6.0" }, "engines": { @@ -2252,9 +2236,9 @@ } }, "node_modules/@docusaurus/logger": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-3.2.1.tgz", - "integrity": "sha512-0voOKJCn9RaM3np6soqEfo7SsVvf2C+CDTWhW+H/1AyBhybASpExtDEz+7ECck9TwPzFQ5tt+I3zVugUJbJWDg==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-3.4.0.tgz", + "integrity": "sha512-bZwkX+9SJ8lB9kVRkXw+xvHYSMGG4bpYHKGXeXFvyVc79NMeeBSGgzd4TQLHH+DYeOJoCdl8flrFJVxlZ0wo/Q==", "dependencies": { "chalk": "^4.1.2", "tslib": "^2.6.0" @@ -2264,13 +2248,13 @@ } }, "node_modules/@docusaurus/mdx-loader": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-3.2.1.tgz", - "integrity": "sha512-Fs8tXhXKZjNkdGaOy1xSLXSwfjCMT73J3Zfrju2U16uGedRFRjgK0ojpK5tiC7TnunsL3tOFgp1BSMBRflX9gw==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-3.4.0.tgz", + "integrity": "sha512-kSSbrrk4nTjf4d+wtBA9H+FGauf2gCax89kV8SUSJu3qaTdSIKdWERlngsiHaCFgZ7laTJ8a67UFf+xlFPtuTw==", "dependencies": { - "@docusaurus/logger": "3.2.1", - "@docusaurus/utils": "3.2.1", - "@docusaurus/utils-validation": "3.2.1", + "@docusaurus/logger": "3.4.0", + "@docusaurus/utils": "3.4.0", + "@docusaurus/utils-validation": "3.4.0", "@mdx-js/mdx": "^3.0.0", "@slorber/remark-comment": "^1.0.0", "escape-html": "^1.0.3", @@ -2302,18 +2286,17 @@ } }, "node_modules/@docusaurus/module-type-aliases": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.2.1.tgz", - "integrity": "sha512-FyViV5TqhL1vsM7eh29nJ5NtbRE6Ra6LP1PDcPvhwPSlA7eiWGRKAn3jWwMUcmjkos5SYY+sr0/feCdbM3eQHQ==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.4.0.tgz", + "integrity": "sha512-A1AyS8WF5Bkjnb8s+guTDuYmUiwJzNrtchebBHpc0gz0PyHJNMaybUlSrmJjHVcGrya0LKI4YcR3lBDQfXRYLw==", "dependencies": { - "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/types": "3.2.1", + "@docusaurus/types": "3.4.0", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", "@types/react-router-dom": "*", "react-helmet-async": "*", - "react-loadable": "npm:@docusaurus/react-loadable@5.5.2" + "react-loadable": "npm:@docusaurus/react-loadable@6.0.0" }, "peerDependencies": { "react": "*", @@ -2321,17 +2304,17 @@ } }, "node_modules/@docusaurus/plugin-content-blog": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.2.1.tgz", - "integrity": "sha512-lOx0JfhlGZoZu6pEJfeEpSISZR5dQbJGGvb42IP13G5YThNHhG9R9uoWuo4IOimPqBC7sHThdLA3VLevk61Fsw==", - "dependencies": { - "@docusaurus/core": "3.2.1", - "@docusaurus/logger": "3.2.1", - "@docusaurus/mdx-loader": "3.2.1", - "@docusaurus/types": "3.2.1", - "@docusaurus/utils": "3.2.1", - "@docusaurus/utils-common": "3.2.1", - "@docusaurus/utils-validation": "3.2.1", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.4.0.tgz", + "integrity": "sha512-vv6ZAj78ibR5Jh7XBUT4ndIjmlAxkijM3Sx5MAAzC1gyv0vupDQNhzuFg1USQmQVj3P5I6bquk12etPV3LJ+Xw==", + "dependencies": { + "@docusaurus/core": "3.4.0", + "@docusaurus/logger": "3.4.0", + "@docusaurus/mdx-loader": "3.4.0", + "@docusaurus/types": "3.4.0", + "@docusaurus/utils": "3.4.0", + "@docusaurus/utils-common": "3.4.0", + "@docusaurus/utils-validation": "3.4.0", "cheerio": "^1.0.0-rc.12", "feed": "^4.2.2", "fs-extra": "^11.1.1", @@ -2352,18 +2335,18 @@ } }, "node_modules/@docusaurus/plugin-content-docs": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.2.1.tgz", - "integrity": "sha512-GHe5b/lCskAR8QVbfWAfPAApvRZgqk7FN3sOHgjCtjzQACZxkHmq6QqyqZ8Jp45V7lVck4wt2Xw2IzBJ7Cz3bA==", - "dependencies": { - "@docusaurus/core": "3.2.1", - "@docusaurus/logger": "3.2.1", - "@docusaurus/mdx-loader": "3.2.1", - "@docusaurus/module-type-aliases": "3.2.1", - "@docusaurus/types": "3.2.1", - "@docusaurus/utils": "3.2.1", - "@docusaurus/utils-common": "3.2.1", - "@docusaurus/utils-validation": "3.2.1", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.4.0.tgz", + "integrity": "sha512-HkUCZffhBo7ocYheD9oZvMcDloRnGhBMOZRyVcAQRFmZPmNqSyISlXA1tQCIxW+r478fty97XXAGjNYzBjpCsg==", + "dependencies": { + "@docusaurus/core": "3.4.0", + "@docusaurus/logger": "3.4.0", + "@docusaurus/mdx-loader": "3.4.0", + "@docusaurus/module-type-aliases": "3.4.0", + "@docusaurus/types": "3.4.0", + "@docusaurus/utils": "3.4.0", + "@docusaurus/utils-common": "3.4.0", + "@docusaurus/utils-validation": "3.4.0", "@types/react-router-config": "^5.0.7", "combine-promises": "^1.1.0", "fs-extra": "^11.1.1", @@ -2382,15 +2365,15 @@ } }, "node_modules/@docusaurus/plugin-content-pages": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.2.1.tgz", - "integrity": "sha512-TOqVfMVTAHqWNEGM94Drz+PUpHDbwFy6ucHFgyTx9zJY7wPNSG5EN+rd/mU7OvAi26qpOn2o9xTdUmb28QLjEQ==", - "dependencies": { - "@docusaurus/core": "3.2.1", - "@docusaurus/mdx-loader": "3.2.1", - "@docusaurus/types": "3.2.1", - "@docusaurus/utils": "3.2.1", - "@docusaurus/utils-validation": "3.2.1", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.4.0.tgz", + "integrity": "sha512-h2+VN/0JjpR8fIkDEAoadNjfR3oLzB+v1qSXbIAKjQ46JAHx3X22n9nqS+BWSQnTnp1AjkjSvZyJMekmcwxzxg==", + "dependencies": { + "@docusaurus/core": "3.4.0", + "@docusaurus/mdx-loader": "3.4.0", + "@docusaurus/types": "3.4.0", + "@docusaurus/utils": "3.4.0", + "@docusaurus/utils-validation": "3.4.0", "fs-extra": "^11.1.1", "tslib": "^2.6.0", "webpack": "^5.88.1" @@ -2404,13 +2387,13 @@ } }, "node_modules/@docusaurus/plugin-debug": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-3.2.1.tgz", - "integrity": "sha512-AMKq8NuUKf2sRpN1m/sIbqbRbnmk+rSA+8mNU1LNxEl9BW9F/Gng8m9HKlzeyMPrf5XidzS1jqkuTLDJ6KIrFw==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-3.4.0.tgz", + "integrity": "sha512-uV7FDUNXGyDSD3PwUaf5YijX91T5/H9SX4ErEcshzwgzWwBtK37nUWPU3ZLJfeTavX3fycTOqk9TglpOLaWkCg==", "dependencies": { - "@docusaurus/core": "3.2.1", - "@docusaurus/types": "3.2.1", - "@docusaurus/utils": "3.2.1", + "@docusaurus/core": "3.4.0", + "@docusaurus/types": "3.4.0", + "@docusaurus/utils": "3.4.0", "fs-extra": "^11.1.1", "react-json-view-lite": "^1.2.0", "tslib": "^2.6.0" @@ -2424,13 +2407,13 @@ } }, "node_modules/@docusaurus/plugin-google-analytics": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.2.1.tgz", - "integrity": "sha512-/rJ+9u+Px0eTCiF4TNcNtj3kHf8cp6K1HCwOTdbsSlz6Xn21syZYcy+f1VM9wF6HrvUkXUcbM5TDCvg2IRL6bQ==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.4.0.tgz", + "integrity": "sha512-mCArluxEGi3cmYHqsgpGGt3IyLCrFBxPsxNZ56Mpur0xSlInnIHoeLDH7FvVVcPJRPSQ9/MfRqLsainRw+BojA==", "dependencies": { - "@docusaurus/core": "3.2.1", - "@docusaurus/types": "3.2.1", - "@docusaurus/utils-validation": "3.2.1", + "@docusaurus/core": "3.4.0", + "@docusaurus/types": "3.4.0", + "@docusaurus/utils-validation": "3.4.0", "tslib": "^2.6.0" }, "engines": { @@ -2442,13 +2425,13 @@ } }, "node_modules/@docusaurus/plugin-google-gtag": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.2.1.tgz", - "integrity": "sha512-XtuJnlMvYfppeVdUyKiDIJAa/gTJKCQU92z8CLZZ9ibJdgVjFOLS10s0hIC0eL5z0U2u2loJz2rZ63HOkNHbBA==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.4.0.tgz", + "integrity": "sha512-Dsgg6PLAqzZw5wZ4QjUYc8Z2KqJqXxHxq3vIoyoBWiLEEfigIs7wHR+oiWUQy3Zk9MIk6JTYj7tMoQU0Jm3nqA==", "dependencies": { - "@docusaurus/core": "3.2.1", - "@docusaurus/types": "3.2.1", - "@docusaurus/utils-validation": "3.2.1", + "@docusaurus/core": "3.4.0", + "@docusaurus/types": "3.4.0", + "@docusaurus/utils-validation": "3.4.0", "@types/gtag.js": "^0.0.12", "tslib": "^2.6.0" }, @@ -2461,13 +2444,13 @@ } }, "node_modules/@docusaurus/plugin-google-tag-manager": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.2.1.tgz", - "integrity": "sha512-wiS/kE0Ny5pnjTxVCs8ljRnkL1RVMj59t6jmSsgEX7piDOoaXSMIUaoIt9ogS/v132uO0xEsxHstkRUZHQyPcQ==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.4.0.tgz", + "integrity": "sha512-O9tX1BTwxIhgXpOLpFDueYA9DWk69WCbDRrjYoMQtFHSkTyE7RhNgyjSPREUWJb9i+YUg3OrsvrBYRl64FCPCQ==", "dependencies": { - "@docusaurus/core": "3.2.1", - "@docusaurus/types": "3.2.1", - "@docusaurus/utils-validation": "3.2.1", + "@docusaurus/core": "3.4.0", + "@docusaurus/types": "3.4.0", + "@docusaurus/utils-validation": "3.4.0", "tslib": "^2.6.0" }, "engines": { @@ -2479,16 +2462,16 @@ } }, "node_modules/@docusaurus/plugin-sitemap": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.2.1.tgz", - "integrity": "sha512-uWZ7AxzdeaQSTCwD2yZtOiEm9zyKU+wqCmi/Sf25kQQqqFSBZUStXfaQ8OHP9cecnw893ZpZ811rPhB/wfujJw==", - "dependencies": { - "@docusaurus/core": "3.2.1", - "@docusaurus/logger": "3.2.1", - "@docusaurus/types": "3.2.1", - "@docusaurus/utils": "3.2.1", - "@docusaurus/utils-common": "3.2.1", - "@docusaurus/utils-validation": "3.2.1", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.4.0.tgz", + "integrity": "sha512-+0VDvx9SmNrFNgwPoeoCha+tRoAjopwT0+pYO1xAbyLcewXSemq+eLxEa46Q1/aoOaJQ0qqHELuQM7iS2gp33Q==", + "dependencies": { + "@docusaurus/core": "3.4.0", + "@docusaurus/logger": "3.4.0", + "@docusaurus/types": "3.4.0", + "@docusaurus/utils": "3.4.0", + "@docusaurus/utils-common": "3.4.0", + "@docusaurus/utils-validation": "3.4.0", "fs-extra": "^11.1.1", "sitemap": "^7.1.1", "tslib": "^2.6.0" @@ -2502,23 +2485,23 @@ } }, "node_modules/@docusaurus/preset-classic": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-3.2.1.tgz", - "integrity": "sha512-E3OHSmttpEBcSMhfPBq3EJMBxZBM01W1rnaCUTXy9EHvkmB5AwgTfW1PwGAybPAX579ntE03R+2zmXdizWfKnQ==", - "dependencies": { - "@docusaurus/core": "3.2.1", - "@docusaurus/plugin-content-blog": "3.2.1", - "@docusaurus/plugin-content-docs": "3.2.1", - "@docusaurus/plugin-content-pages": "3.2.1", - "@docusaurus/plugin-debug": "3.2.1", - "@docusaurus/plugin-google-analytics": "3.2.1", - "@docusaurus/plugin-google-gtag": "3.2.1", - "@docusaurus/plugin-google-tag-manager": "3.2.1", - "@docusaurus/plugin-sitemap": "3.2.1", - "@docusaurus/theme-classic": "3.2.1", - "@docusaurus/theme-common": "3.2.1", - "@docusaurus/theme-search-algolia": "3.2.1", - "@docusaurus/types": "3.2.1" + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-3.4.0.tgz", + "integrity": "sha512-Ohj6KB7siKqZaQhNJVMBBUzT3Nnp6eTKqO+FXO3qu/n1hJl3YLwVKTWBg28LF7MWrKu46UuYavwMRxud0VyqHg==", + "dependencies": { + "@docusaurus/core": "3.4.0", + "@docusaurus/plugin-content-blog": "3.4.0", + "@docusaurus/plugin-content-docs": "3.4.0", + "@docusaurus/plugin-content-pages": "3.4.0", + "@docusaurus/plugin-debug": "3.4.0", + "@docusaurus/plugin-google-analytics": "3.4.0", + "@docusaurus/plugin-google-gtag": "3.4.0", + "@docusaurus/plugin-google-tag-manager": "3.4.0", + "@docusaurus/plugin-sitemap": "3.4.0", + "@docusaurus/theme-classic": "3.4.0", + "@docusaurus/theme-common": "3.4.0", + "@docusaurus/theme-search-algolia": "3.4.0", + "@docusaurus/types": "3.4.0" }, "engines": { "node": ">=18.0" @@ -2528,35 +2511,23 @@ "react-dom": "^18.0.0" } }, - "node_modules/@docusaurus/react-loadable": { - "version": "5.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", - "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", - "dependencies": { - "@types/react": "*", - "prop-types": "^15.6.2" - }, - "peerDependencies": { - "react": "*" - } - }, "node_modules/@docusaurus/theme-classic": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-3.2.1.tgz", - "integrity": "sha512-+vSbnQyoWjc6vRZi4vJO2dBU02wqzynsai15KK+FANZudrYaBHtkbLZAQhgmxzBGVpxzi87gRohlMm+5D8f4tA==", - "dependencies": { - "@docusaurus/core": "3.2.1", - "@docusaurus/mdx-loader": "3.2.1", - "@docusaurus/module-type-aliases": "3.2.1", - "@docusaurus/plugin-content-blog": "3.2.1", - "@docusaurus/plugin-content-docs": "3.2.1", - "@docusaurus/plugin-content-pages": "3.2.1", - "@docusaurus/theme-common": "3.2.1", - "@docusaurus/theme-translations": "3.2.1", - "@docusaurus/types": "3.2.1", - "@docusaurus/utils": "3.2.1", - "@docusaurus/utils-common": "3.2.1", - "@docusaurus/utils-validation": "3.2.1", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-3.4.0.tgz", + "integrity": "sha512-0IPtmxsBYv2adr1GnZRdMkEQt1YW6tpzrUPj02YxNpvJ5+ju4E13J5tB4nfdaen/tfR1hmpSPlTFPvTf4kwy8Q==", + "dependencies": { + "@docusaurus/core": "3.4.0", + "@docusaurus/mdx-loader": "3.4.0", + "@docusaurus/module-type-aliases": "3.4.0", + "@docusaurus/plugin-content-blog": "3.4.0", + "@docusaurus/plugin-content-docs": "3.4.0", + "@docusaurus/plugin-content-pages": "3.4.0", + "@docusaurus/theme-common": "3.4.0", + "@docusaurus/theme-translations": "3.4.0", + "@docusaurus/types": "3.4.0", + "@docusaurus/utils": "3.4.0", + "@docusaurus/utils-common": "3.4.0", + "@docusaurus/utils-validation": "3.4.0", "@mdx-js/react": "^3.0.0", "clsx": "^2.0.0", "copy-text-to-clipboard": "^3.2.0", @@ -2580,17 +2551,17 @@ } }, "node_modules/@docusaurus/theme-common": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.2.1.tgz", - "integrity": "sha512-d+adiD7L9xv6EvfaAwUqdKf4orsM3jqgeqAM+HAjgL/Ux0GkVVnfKr+tsoe+4ow4rHe6NUt+nkkW8/K8dKdilA==", - "dependencies": { - "@docusaurus/mdx-loader": "3.2.1", - "@docusaurus/module-type-aliases": "3.2.1", - "@docusaurus/plugin-content-blog": "3.2.1", - "@docusaurus/plugin-content-docs": "3.2.1", - "@docusaurus/plugin-content-pages": "3.2.1", - "@docusaurus/utils": "3.2.1", - "@docusaurus/utils-common": "3.2.1", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.4.0.tgz", + "integrity": "sha512-0A27alXuv7ZdCg28oPE8nH/Iz73/IUejVaCazqu9elS4ypjiLhK3KfzdSQBnL/g7YfHSlymZKdiOHEo8fJ0qMA==", + "dependencies": { + "@docusaurus/mdx-loader": "3.4.0", + "@docusaurus/module-type-aliases": "3.4.0", + "@docusaurus/plugin-content-blog": "3.4.0", + "@docusaurus/plugin-content-docs": "3.4.0", + "@docusaurus/plugin-content-pages": "3.4.0", + "@docusaurus/utils": "3.4.0", + "@docusaurus/utils-common": "3.4.0", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -2609,18 +2580,18 @@ } }, "node_modules/@docusaurus/theme-search-algolia": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.2.1.tgz", - "integrity": "sha512-bzhCrpyXBXzeydNUH83II2akvFEGfhsNTPPWsk5N7e+odgQCQwoHhcF+2qILbQXjaoZ6B3c48hrvkyCpeyqGHw==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.4.0.tgz", + "integrity": "sha512-aiHFx7OCw4Wck1z6IoShVdUWIjntC8FHCw9c5dR8r3q4Ynh+zkS8y2eFFunN/DL6RXPzpnvKCg3vhLQYJDmT9Q==", "dependencies": { "@docsearch/react": "^3.5.2", - "@docusaurus/core": "3.2.1", - "@docusaurus/logger": "3.2.1", - "@docusaurus/plugin-content-docs": "3.2.1", - "@docusaurus/theme-common": "3.2.1", - "@docusaurus/theme-translations": "3.2.1", - "@docusaurus/utils": "3.2.1", - "@docusaurus/utils-validation": "3.2.1", + "@docusaurus/core": "3.4.0", + "@docusaurus/logger": "3.4.0", + "@docusaurus/plugin-content-docs": "3.4.0", + "@docusaurus/theme-common": "3.4.0", + "@docusaurus/theme-translations": "3.4.0", + "@docusaurus/utils": "3.4.0", + "@docusaurus/utils-validation": "3.4.0", "algoliasearch": "^4.18.0", "algoliasearch-helper": "^3.13.3", "clsx": "^2.0.0", @@ -2639,9 +2610,9 @@ } }, "node_modules/@docusaurus/theme-translations": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-3.2.1.tgz", - "integrity": "sha512-jAUMkIkFfY+OAhJhv6mV8zlwY6J4AQxJPTgLdR2l+Otof9+QdJjHNh/ifVEu9q0lp3oSPlJj9l05AaP7Ref+cg==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-3.4.0.tgz", + "integrity": "sha512-zSxCSpmQCCdQU5Q4CnX/ID8CSUUI3fvmq4hU/GNP/XoAWtXo9SAVnM3TzpU8Gb//H3WCsT8mJcTfyOk3d9ftNg==", "dependencies": { "fs-extra": "^11.1.1", "tslib": "^2.6.0" @@ -2651,15 +2622,15 @@ } }, "node_modules/@docusaurus/tsconfig": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/tsconfig/-/tsconfig-3.2.1.tgz", - "integrity": "sha512-+biUwtsYW3oChLxYezzA+NIgS3Q9KDRl7add/YT54RXs9Q4rKInebxdHdG6JFs5BaTg45gyjDu0rvNVcGeHODg==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/tsconfig/-/tsconfig-3.4.0.tgz", + "integrity": "sha512-0qENiJ+TRaeTzcg4olrnh0BQ7eCxTgbYWBnWUeQDc84UYkt/T3pDNnm3SiQkqPb+YQ1qtYFlC0RriAElclo8Dg==", "dev": true }, "node_modules/@docusaurus/types": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.2.1.tgz", - "integrity": "sha512-n/toxBzL2oxTtRTOFiGKsHypzn/Pm+sXyw+VSk1UbqbXQiHOwHwts55bpKwbcUgA530Is6kix3ELiFOv9GAMfw==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.4.0.tgz", + "integrity": "sha512-4jcDO8kXi5Cf9TcyikB/yKmz14f2RZ2qTRerbHAsS+5InE9ZgSLBNLsewtFTcTOXSVcbU3FoGOzcNWAmU1TR0A==", "dependencies": { "@mdx-js/mdx": "^3.0.0", "@types/history": "^4.7.11", @@ -2677,13 +2648,13 @@ } }, "node_modules/@docusaurus/utils": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.2.1.tgz", - "integrity": "sha512-DPkIS/EPc+pGAV798PUXgNzJFM3HJouoQXgr0KDZuJVz1EkWbDLOcQwLIz8Qx7liI9ddfkN/TXTRQdsTPZNakw==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.4.0.tgz", + "integrity": "sha512-fRwnu3L3nnWaXOgs88BVBmG1yGjcQqZNHG+vInhEa2Sz2oQB+ZjbEMO5Rh9ePFpZ0YDiDUhpaVjwmS+AU2F14g==", "dependencies": { - "@docusaurus/logger": "3.2.1", - "@docusaurus/utils-common": "3.2.1", - "@svgr/webpack": "^6.5.1", + "@docusaurus/logger": "3.4.0", + "@docusaurus/utils-common": "3.4.0", + "@svgr/webpack": "^8.1.0", "escape-string-regexp": "^4.0.0", "file-loader": "^6.2.0", "fs-extra": "^11.1.1", @@ -2699,6 +2670,7 @@ "shelljs": "^0.8.5", "tslib": "^2.6.0", "url-loader": "^4.1.1", + "utility-types": "^3.10.0", "webpack": "^5.88.1" }, "engines": { @@ -2714,9 +2686,9 @@ } }, "node_modules/@docusaurus/utils-common": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-3.2.1.tgz", - "integrity": "sha512-N5vadULnRLiqX2QfTjVEU3u5vo6RG2EZTdyXvJdzDOdrLCGIZAfnf/VkssinFZ922sVfaFfQ4FnStdhn5TWdVg==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-3.4.0.tgz", + "integrity": "sha512-NVx54Wr4rCEKsjOH5QEVvxIqVvm+9kh7q8aYTU5WzUU9/Hctd6aTrcZ3G0Id4zYJ+AeaG5K5qHA4CY5Kcm2iyQ==", "dependencies": { "tslib": "^2.6.0" }, @@ -2733,15 +2705,17 @@ } }, "node_modules/@docusaurus/utils-validation": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-3.2.1.tgz", - "integrity": "sha512-+x7IR9hNMXi62L1YAglwd0s95fR7+EtirjTxSN4kahYRWGqOi3jlQl1EV0az/yTEvKbxVvOPcdYicGu9dk4LJw==", - "dependencies": { - "@docusaurus/logger": "3.2.1", - "@docusaurus/utils": "3.2.1", - "@docusaurus/utils-common": "3.2.1", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-3.4.0.tgz", + "integrity": "sha512-hYQ9fM+AXYVTWxJOT1EuNaRnrR2WGpRdLDQG07O8UOpsvCPWUVOeo26Rbm0JWY2sGLfzAb+tvJ62yF+8F+TV0g==", + "dependencies": { + "@docusaurus/logger": "3.4.0", + "@docusaurus/utils": "3.4.0", + "@docusaurus/utils-common": "3.4.0", + "fs-extra": "^11.2.0", "joi": "^17.9.2", "js-yaml": "^4.1.0", + "lodash": "^4.17.21", "tslib": "^2.6.0" }, "engines": { @@ -2761,50 +2735,6 @@ "@hapi/hoek": "^9.0.0" } }, - "node_modules/@isaacs/cliui": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", - "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", - "dev": true, - "dependencies": { - "string-width": "^5.1.2", - "string-width-cjs": "npm:string-width@^4.2.0", - "strip-ansi": "^7.0.1", - "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", - "wrap-ansi": "^8.1.0", - "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@isaacs/cliui/node_modules/ansi-regex": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", - "dev": true, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/@isaacs/cliui/node_modules/strip-ansi": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", - "dev": true, - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, "node_modules/@jest/schemas": { "version": "29.6.3", "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", @@ -2971,16 +2901,6 @@ "node": ">= 8" } }, - "node_modules/@pkgjs/parseargs": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", - "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", - "dev": true, - "optional": true, - "engines": { - "node": ">=14" - } - }, "node_modules/@pnpm/config.env-replace": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@pnpm/config.env-replace/-/config.env-replace-1.1.0.tgz", @@ -3068,11 +2988,11 @@ } }, "node_modules/@svgr/babel-plugin-add-jsx-attribute": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-6.5.1.tgz", - "integrity": "sha512-9PYGcXrAxitycIjRmZB+Q0JaN07GZIWaTBIGQzfaZv+qr1n8X1XUEJ5rZ/vx6OVD9RRYlrNnXWExQXcmZeD/BQ==", + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-8.0.0.tgz", + "integrity": "sha512-b9MIk7yhdS1pMCZM8VeNfUlSKVRhsHZNMl5O9SfaX0l0t5wjdgu4IDzGB8bpnGBBOjGST3rRFVsaaEtI4W6f7g==", "engines": { - "node": ">=10" + "node": ">=14" }, "funding": { "type": "github", @@ -3113,11 +3033,11 @@ } }, "node_modules/@svgr/babel-plugin-replace-jsx-attribute-value": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-6.5.1.tgz", - "integrity": "sha512-8DPaVVE3fd5JKuIC29dqyMB54sA6mfgki2H2+swh+zNJoynC8pMPzOkidqHOSc6Wj032fhl8Z0TVn1GiPpAiJg==", + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-8.0.0.tgz", + "integrity": "sha512-KVQ+PtIjb1BuYT3ht8M5KbzWBhdAjjUPdlMtpuw/VjT8coTrItWX6Qafl9+ji831JaJcu6PJNKCV0bp01lBNzQ==", "engines": { - "node": ">=10" + "node": ">=14" }, "funding": { "type": "github", @@ -3128,11 +3048,11 @@ } }, "node_modules/@svgr/babel-plugin-svg-dynamic-title": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-6.5.1.tgz", - "integrity": "sha512-FwOEi0Il72iAzlkaHrlemVurgSQRDFbk0OC8dSvD5fSBPHltNh7JtLsxmZUhjYBZo2PpcU/RJvvi6Q0l7O7ogw==", + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-8.0.0.tgz", + "integrity": "sha512-omNiKqwjNmOQJ2v6ge4SErBbkooV2aAWwaPFs2vUY7p7GhVkzRkJ00kILXQvRhA6miHnNpXv7MRnnSjdRjK8og==", "engines": { - "node": ">=10" + "node": ">=14" }, "funding": { "type": "github", @@ -3143,11 +3063,11 @@ } }, "node_modules/@svgr/babel-plugin-svg-em-dimensions": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-6.5.1.tgz", - "integrity": "sha512-gWGsiwjb4tw+ITOJ86ndY/DZZ6cuXMNE/SjcDRg+HLuCmwpcjOktwRF9WgAiycTqJD/QXqL2f8IzE2Rzh7aVXA==", + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-8.0.0.tgz", + "integrity": "sha512-mURHYnu6Iw3UBTbhGwE/vsngtCIbHE43xCRK7kCw4t01xyGqb2Pd+WXekRRoFOBIY29ZoOhUCTEweDMdrjfi9g==", "engines": { - "node": ">=10" + "node": ">=14" }, "funding": { "type": "github", @@ -3158,11 +3078,11 @@ } }, "node_modules/@svgr/babel-plugin-transform-react-native-svg": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-6.5.1.tgz", - "integrity": "sha512-2jT3nTayyYP7kI6aGutkyfJ7UMGtuguD72OjeGLwVNyfPRBD8zQthlvL+fAbAKk5n9ZNcvFkp/b1lZ7VsYqVJg==", + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-8.1.0.tgz", + "integrity": "sha512-Tx8T58CHo+7nwJ+EhUwx3LfdNSG9R2OKfaIXXs5soiy5HtgoAEkDay9LIimLOcG8dJQH1wPZp/cnAv6S9CrR1Q==", "engines": { - "node": ">=10" + "node": ">=14" }, "funding": { "type": "github", @@ -3173,9 +3093,9 @@ } }, "node_modules/@svgr/babel-plugin-transform-svg-component": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-6.5.1.tgz", - "integrity": "sha512-a1p6LF5Jt33O3rZoVRBqdxL350oge54iZWHNI6LJB5tQ7EelvD/Mb1mfBiZNAan0dt4i3VArkFRjA4iObuNykQ==", + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-8.0.0.tgz", + "integrity": "sha512-DFx8xa3cZXTdb/k3kfPeaixecQLgKh5NVBMwD0AQxOzcZawK4oo1Jh9LbrcACUivsCA7TLG8eeWgrDXjTMhRmw==", "engines": { "node": ">=12" }, @@ -3188,21 +3108,21 @@ } }, "node_modules/@svgr/babel-preset": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-preset/-/babel-preset-6.5.1.tgz", - "integrity": "sha512-6127fvO/FF2oi5EzSQOAjo1LE3OtNVh11R+/8FXa+mHx1ptAaS4cknIjnUA7e6j6fwGGJ17NzaTJFUwOV2zwCw==", + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-preset/-/babel-preset-8.1.0.tgz", + "integrity": "sha512-7EYDbHE7MxHpv4sxvnVPngw5fuR6pw79SkcrILHJ/iMpuKySNCl5W1qcwPEpU+LgyRXOaAFgH0KhwD18wwg6ug==", "dependencies": { - "@svgr/babel-plugin-add-jsx-attribute": "^6.5.1", - "@svgr/babel-plugin-remove-jsx-attribute": "*", - "@svgr/babel-plugin-remove-jsx-empty-expression": "*", - "@svgr/babel-plugin-replace-jsx-attribute-value": "^6.5.1", - "@svgr/babel-plugin-svg-dynamic-title": "^6.5.1", - "@svgr/babel-plugin-svg-em-dimensions": "^6.5.1", - "@svgr/babel-plugin-transform-react-native-svg": "^6.5.1", - "@svgr/babel-plugin-transform-svg-component": "^6.5.1" + "@svgr/babel-plugin-add-jsx-attribute": "8.0.0", + "@svgr/babel-plugin-remove-jsx-attribute": "8.0.0", + "@svgr/babel-plugin-remove-jsx-empty-expression": "8.0.0", + "@svgr/babel-plugin-replace-jsx-attribute-value": "8.0.0", + "@svgr/babel-plugin-svg-dynamic-title": "8.0.0", + "@svgr/babel-plugin-svg-em-dimensions": "8.0.0", + "@svgr/babel-plugin-transform-react-native-svg": "8.1.0", + "@svgr/babel-plugin-transform-svg-component": "8.0.0" }, "engines": { - "node": ">=10" + "node": ">=14" }, "funding": { "type": "github", @@ -3213,18 +3133,18 @@ } }, "node_modules/@svgr/core": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/core/-/core-6.5.1.tgz", - "integrity": "sha512-/xdLSWxK5QkqG524ONSjvg3V/FkNyCv538OIBdQqPNaAta3AsXj/Bd2FbvR87yMbXO2hFSWiAe/Q6IkVPDw+mw==", + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/core/-/core-8.1.0.tgz", + "integrity": "sha512-8QqtOQT5ACVlmsvKOJNEaWmRPmcojMOzCz4Hs2BGG/toAp/K38LcsMRyLp349glq5AzJbCEeimEoxaX6v/fLrA==", "dependencies": { - "@babel/core": "^7.19.6", - "@svgr/babel-preset": "^6.5.1", - "@svgr/plugin-jsx": "^6.5.1", + "@babel/core": "^7.21.3", + "@svgr/babel-preset": "8.1.0", "camelcase": "^6.2.0", - "cosmiconfig": "^7.0.1" + "cosmiconfig": "^8.1.3", + "snake-case": "^3.0.4" }, "engines": { - "node": ">=10" + "node": ">=14" }, "funding": { "type": "github", @@ -3232,15 +3152,15 @@ } }, "node_modules/@svgr/hast-util-to-babel-ast": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-6.5.1.tgz", - "integrity": "sha512-1hnUxxjd83EAxbL4a0JDJoD3Dao3hmjvyvyEV8PzWmLK3B9m9NPlW7GKjFyoWE8nM7HnXzPcmmSyOW8yOddSXw==", + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-8.0.0.tgz", + "integrity": "sha512-EbDKwO9GpfWP4jN9sGdYwPBU0kdomaPIL2Eu4YwmgP+sJeXT+L7bMwJUBnhzfH8Q2qMBqZ4fJwpCyYsAN3mt2Q==", "dependencies": { - "@babel/types": "^7.20.0", + "@babel/types": "^7.21.3", "entities": "^4.4.0" }, "engines": { - "node": ">=10" + "node": ">=14" }, "funding": { "type": "github", @@ -3248,37 +3168,37 @@ } }, "node_modules/@svgr/plugin-jsx": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/plugin-jsx/-/plugin-jsx-6.5.1.tgz", - "integrity": "sha512-+UdQxI3jgtSjCykNSlEMuy1jSRQlGC7pqBCPvkG/2dATdWo082zHTTK3uhnAju2/6XpE6B5mZ3z4Z8Ns01S8Gw==", + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/plugin-jsx/-/plugin-jsx-8.1.0.tgz", + "integrity": "sha512-0xiIyBsLlr8quN+WyuxooNW9RJ0Dpr8uOnH/xrCVO8GLUcwHISwj1AG0k+LFzteTkAA0GbX0kj9q6Dk70PTiPA==", "dependencies": { - "@babel/core": "^7.19.6", - "@svgr/babel-preset": "^6.5.1", - "@svgr/hast-util-to-babel-ast": "^6.5.1", + "@babel/core": "^7.21.3", + "@svgr/babel-preset": "8.1.0", + "@svgr/hast-util-to-babel-ast": "8.0.0", "svg-parser": "^2.0.4" }, "engines": { - "node": ">=10" + "node": ">=14" }, "funding": { "type": "github", "url": "https://github.com/sponsors/gregberge" }, "peerDependencies": { - "@svgr/core": "^6.0.0" + "@svgr/core": "*" } }, "node_modules/@svgr/plugin-svgo": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/plugin-svgo/-/plugin-svgo-6.5.1.tgz", - "integrity": "sha512-omvZKf8ixP9z6GWgwbtmP9qQMPX4ODXi+wzbVZgomNFsUIlHA1sf4fThdwTWSsZGgvGAG6yE+b/F5gWUkcZ/iQ==", + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/plugin-svgo/-/plugin-svgo-8.1.0.tgz", + "integrity": "sha512-Ywtl837OGO9pTLIN/onoWLmDQ4zFUycI1g76vuKGEz6evR/ZTJlJuz3G/fIkb6OVBJ2g0o6CGJzaEjfmEo3AHA==", "dependencies": { - "cosmiconfig": "^7.0.1", - "deepmerge": "^4.2.2", - "svgo": "^2.8.0" + "cosmiconfig": "^8.1.3", + "deepmerge": "^4.3.1", + "svgo": "^3.0.2" }, "engines": { - "node": ">=10" + "node": ">=14" }, "funding": { "type": "github", @@ -3289,21 +3209,21 @@ } }, "node_modules/@svgr/webpack": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/webpack/-/webpack-6.5.1.tgz", - "integrity": "sha512-cQ/AsnBkXPkEK8cLbv4Dm7JGXq2XrumKnL1dRpJD9rIO2fTIlJI9a1uCciYG1F2aUsox/hJQyNGbt3soDxSRkA==", + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/webpack/-/webpack-8.1.0.tgz", + "integrity": "sha512-LnhVjMWyMQV9ZmeEy26maJk+8HTIbd59cH4F2MJ439k9DqejRisfFNGAPvRYlKETuh9LrImlS8aKsBgKjMA8WA==", "dependencies": { - "@babel/core": "^7.19.6", - "@babel/plugin-transform-react-constant-elements": "^7.18.12", - "@babel/preset-env": "^7.19.4", + "@babel/core": "^7.21.3", + "@babel/plugin-transform-react-constant-elements": "^7.21.3", + "@babel/preset-env": "^7.20.2", "@babel/preset-react": "^7.18.6", - "@babel/preset-typescript": "^7.18.6", - "@svgr/core": "^6.5.1", - "@svgr/plugin-jsx": "^6.5.1", - "@svgr/plugin-svgo": "^6.5.1" + "@babel/preset-typescript": "^7.21.0", + "@svgr/core": "8.1.0", + "@svgr/plugin-jsx": "8.1.0", + "@svgr/plugin-svgo": "8.1.0" }, "engines": { - "node": ">=10" + "node": ">=14" }, "funding": { "type": "github", @@ -3505,9 +3425,9 @@ "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==" }, "node_modules/@types/mdast": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.3.tgz", - "integrity": "sha512-LsjtqsyF+d2/yFOYaN22dHZI1Cpwkrj+g06G8+qtUKlhovPW89YhqSnfKtMbkgmEtYpH2gydRNULd6y8mciAFg==", + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", "dependencies": { "@types/unist": "*" } @@ -3982,9 +3902,9 @@ } }, "node_modules/algoliasearch-helper": { - "version": "3.18.0", - "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.18.0.tgz", - "integrity": "sha512-ZXvA8r6VG46V343jnIE7Tei8Xr0/9N8YhD27joC0BKxeogQyvNu7O37i510wA7FnrDjoa/tFhK90WUaBlkaqnw==", + "version": "3.21.0", + "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.21.0.tgz", + "integrity": "sha512-hjVOrL15I3Y3K8xG0icwG1/tWE+MocqBrhW6uVBWpU+/kVEMK0BnM2xdssj6mZM61eJ4iRxHR0djEI3ENOpR8w==", "dependencies": { "@algolia/events": "^4.0.1" }, @@ -4051,12 +3971,6 @@ "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, - "node_modules/any-promise": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", - "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", - "dev": true - }, "node_modules/anymatch": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", @@ -4339,11 +4253,11 @@ } }, "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "dependencies": { - "fill-range": "^7.0.1" + "fill-range": "^7.1.1" }, "engines": { "node": ">=8" @@ -4475,15 +4389,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/camelcase-css": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", - "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", - "dev": true, - "engines": { - "node": ">= 6" - } - }, "node_modules/caniuse-api": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", @@ -5072,18 +4977,28 @@ "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" }, "node_modules/cosmiconfig": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.1.0.tgz", - "integrity": "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==", + "version": "8.3.6", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz", + "integrity": "sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==", "dependencies": { - "@types/parse-json": "^4.0.0", - "import-fresh": "^3.2.1", - "parse-json": "^5.0.0", - "path-type": "^4.0.0", - "yaml": "^1.10.0" + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0", + "path-type": "^4.0.0" }, "engines": { - "node": ">=10" + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" + }, + "peerDependencies": { + "typescript": ">=4.9.5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } } }, "node_modules/cross-spawn": { @@ -5125,11 +5040,11 @@ } }, "node_modules/css-declaration-sorter": { - "version": "6.4.1", - "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-6.4.1.tgz", - "integrity": "sha512-rtdthzxKuyq6IzqX6jEcIzQF/YqccluefyCYheovBOLhFT/drQA9zj/UbRAa9J7C0o6EG6u3E6g+vKkay7/k3g==", + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-7.2.0.tgz", + "integrity": "sha512-h70rUM+3PNFuaBDTLe8wF/cdWu+dOZmb7pJt8Z2sedYbAcQVQV/tEchueg3GWxwqS0cxtbxmaHEdkNACqcvsow==", "engines": { - "node": "^10 || ^12 || >=14" + "node": "^14 || ^16 || >=18" }, "peerDependencies": { "postcss": "^8.0.9" @@ -5178,16 +5093,16 @@ } }, "node_modules/css-minimizer-webpack-plugin": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-4.2.2.tgz", - "integrity": "sha512-s3Of/4jKfw1Hj9CxEO1E5oXhQAxlayuHO2y/ML+C6I9sQ7FdzfEV6QgMLN3vI+qFsjJGIAFLKtQK7t8BOXAIyA==", + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-5.0.1.tgz", + "integrity": "sha512-3caImjKFQkS+ws1TGcFn0V1HyDJFq1Euy589JlD6/3rV2kj+w7r5G9WDMgSHvpvXHNZ2calVypZWuEDQd9wfLg==", "dependencies": { - "cssnano": "^5.1.8", - "jest-worker": "^29.1.2", - "postcss": "^8.4.17", - "schema-utils": "^4.0.0", - "serialize-javascript": "^6.0.0", - "source-map": "^0.6.1" + "@jridgewell/trace-mapping": "^0.3.18", + "cssnano": "^6.0.1", + "jest-worker": "^29.4.3", + "postcss": "^8.4.24", + "schema-utils": "^4.0.1", + "serialize-javascript": "^6.0.1" }, "engines": { "node": ">= 14.15.0" @@ -5220,14 +5135,6 @@ } } }, - "node_modules/css-minimizer-webpack-plugin/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/css-select": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/css-select/-/css-select-5.1.0.tgz", @@ -5286,108 +5193,128 @@ } }, "node_modules/cssnano": { - "version": "5.1.15", - "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-5.1.15.tgz", - "integrity": "sha512-j+BKgDcLDQA+eDifLx0EO4XSA56b7uut3BQFH+wbSaSTuGLuiyTa/wbRYthUXX8LC9mLg+WWKe8h+qJuwTAbHw==", + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-6.1.2.tgz", + "integrity": "sha512-rYk5UeX7VAM/u0lNqewCdasdtPK81CgX8wJFLEIXHbV2oldWRgJAsZrdhRXkV1NJzA2g850KiFm9mMU2HxNxMA==", "dependencies": { - "cssnano-preset-default": "^5.2.14", - "lilconfig": "^2.0.3", - "yaml": "^1.10.2" + "cssnano-preset-default": "^6.1.2", + "lilconfig": "^3.1.1" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/cssnano" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/cssnano-preset-advanced": { - "version": "5.3.10", - "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-5.3.10.tgz", - "integrity": "sha512-fnYJyCS9jgMU+cmHO1rPSPf9axbQyD7iUhLO5Df6O4G+fKIOMps+ZbU0PdGFejFBBZ3Pftf18fn1eG7MAPUSWQ==", + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-6.1.2.tgz", + "integrity": "sha512-Nhao7eD8ph2DoHolEzQs5CfRpiEP0xa1HBdnFZ82kvqdmbwVBUr2r1QuQ4t1pi+D1ZpqpcO4T+wy/7RxzJ/WPQ==", "dependencies": { - "autoprefixer": "^10.4.12", - "cssnano-preset-default": "^5.2.14", - "postcss-discard-unused": "^5.1.0", - "postcss-merge-idents": "^5.1.1", - "postcss-reduce-idents": "^5.2.0", - "postcss-zindex": "^5.1.0" + "autoprefixer": "^10.4.19", + "browserslist": "^4.23.0", + "cssnano-preset-default": "^6.1.2", + "postcss-discard-unused": "^6.0.5", + "postcss-merge-idents": "^6.0.3", + "postcss-reduce-idents": "^6.0.3", + "postcss-zindex": "^6.0.2" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/cssnano-preset-default": { - "version": "5.2.14", - "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-5.2.14.tgz", - "integrity": "sha512-t0SFesj/ZV2OTylqQVOrFgEh5uanxbO6ZAdeCrNsUQ6fVuXwYTxJPNAGvGTxHbD68ldIJNec7PyYZDBrfDQ+6A==", - "dependencies": { - "css-declaration-sorter": "^6.3.1", - "cssnano-utils": "^3.1.0", - "postcss-calc": "^8.2.3", - "postcss-colormin": "^5.3.1", - "postcss-convert-values": "^5.1.3", - "postcss-discard-comments": "^5.1.2", - "postcss-discard-duplicates": "^5.1.0", - "postcss-discard-empty": "^5.1.1", - "postcss-discard-overridden": "^5.1.0", - "postcss-merge-longhand": "^5.1.7", - "postcss-merge-rules": "^5.1.4", - "postcss-minify-font-values": "^5.1.0", - "postcss-minify-gradients": "^5.1.1", - "postcss-minify-params": "^5.1.4", - "postcss-minify-selectors": "^5.2.1", - "postcss-normalize-charset": "^5.1.0", - "postcss-normalize-display-values": "^5.1.0", - "postcss-normalize-positions": "^5.1.1", - "postcss-normalize-repeat-style": "^5.1.1", - "postcss-normalize-string": "^5.1.0", - "postcss-normalize-timing-functions": "^5.1.0", - "postcss-normalize-unicode": "^5.1.1", - "postcss-normalize-url": "^5.1.0", - "postcss-normalize-whitespace": "^5.1.1", - "postcss-ordered-values": "^5.1.3", - "postcss-reduce-initial": "^5.1.2", - "postcss-reduce-transforms": "^5.1.0", - "postcss-svgo": "^5.1.0", - "postcss-unique-selectors": "^5.1.1" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-6.1.2.tgz", + "integrity": "sha512-1C0C+eNaeN8OcHQa193aRgYexyJtU8XwbdieEjClw+J9d94E41LwT6ivKH0WT+fYwYWB0Zp3I3IZ7tI/BbUbrg==", + "dependencies": { + "browserslist": "^4.23.0", + "css-declaration-sorter": "^7.2.0", + "cssnano-utils": "^4.0.2", + "postcss-calc": "^9.0.1", + "postcss-colormin": "^6.1.0", + "postcss-convert-values": "^6.1.0", + "postcss-discard-comments": "^6.0.2", + "postcss-discard-duplicates": "^6.0.3", + "postcss-discard-empty": "^6.0.3", + "postcss-discard-overridden": "^6.0.2", + "postcss-merge-longhand": "^6.0.5", + "postcss-merge-rules": "^6.1.1", + "postcss-minify-font-values": "^6.1.0", + "postcss-minify-gradients": "^6.0.3", + "postcss-minify-params": "^6.1.0", + "postcss-minify-selectors": "^6.0.4", + "postcss-normalize-charset": "^6.0.2", + "postcss-normalize-display-values": "^6.0.2", + "postcss-normalize-positions": "^6.0.2", + "postcss-normalize-repeat-style": "^6.0.2", + "postcss-normalize-string": "^6.0.2", + "postcss-normalize-timing-functions": "^6.0.2", + "postcss-normalize-unicode": "^6.1.0", + "postcss-normalize-url": "^6.0.2", + "postcss-normalize-whitespace": "^6.0.2", + "postcss-ordered-values": "^6.0.2", + "postcss-reduce-initial": "^6.1.0", + "postcss-reduce-transforms": "^6.0.2", + "postcss-svgo": "^6.0.3", + "postcss-unique-selectors": "^6.0.4" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" } }, "node_modules/cssnano-utils": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/cssnano-utils/-/cssnano-utils-3.1.0.tgz", - "integrity": "sha512-JQNR19/YZhz4psLX/rQ9M83e3z2Wf/HdJbryzte4a3NSuafyp9w/I4U+hx5C2S9g41qlstH7DEWnZaaj83OuEA==", + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/cssnano-utils/-/cssnano-utils-4.0.2.tgz", + "integrity": "sha512-ZR1jHg+wZ8o4c3zqf1SIUSTIvm/9mU343FMR6Obe/unskbvpGhZOo1J6d/r8D1pzkRQYuwbcH3hToOuoA2G7oQ==", "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/csso": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/csso/-/csso-4.2.0.tgz", - "integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==", + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/csso/-/csso-5.0.5.tgz", + "integrity": "sha512-0LrrStPOdJj+SPCCrGhzryycLjwcgUSHBtxNA8aIDxf0GLsRh1cKYhB00Gd1lDOS4yGH69+SNn13+TWbVHETFQ==", "dependencies": { - "css-tree": "^1.1.2" + "css-tree": "~2.2.0" }, "engines": { - "node": ">=8.0.0" + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0", + "npm": ">=7.0.0" + } + }, + "node_modules/csso/node_modules/css-tree": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.2.1.tgz", + "integrity": "sha512-OA0mILzGc1kCOCSJerOeqDxDQ4HOh+G8NbOJFOTgOCzpw7fCBubk0fEyxp8AgOL/jvLgYA/uV0cMbe43ElF1JA==", + "dependencies": { + "mdn-data": "2.0.28", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0", + "npm": ">=7.0.0" } }, + "node_modules/csso/node_modules/mdn-data": { + "version": "2.0.28", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.28.tgz", + "integrity": "sha512-aylIc7Z9y4yzHYAJNuESG3hfhC+0Ibp/MAMiaOZgNv4pmEdFyfZhhhny4MNiAfWdBQ1RQ2mfDWmM1x8SvGyp8g==" + }, "node_modules/csstype": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", @@ -5631,12 +5558,6 @@ "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/didyoumean": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", - "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", - "dev": true - }, "node_modules/dir-glob": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", @@ -5648,12 +5569,6 @@ "node": ">=8" } }, - "node_modules/dlv": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", - "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", - "dev": true - }, "node_modules/dns-packet": { "version": "5.6.1", "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.1.tgz", @@ -6373,9 +6288,9 @@ } }, "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", "dependencies": { "to-regex-range": "^5.0.1" }, @@ -6470,34 +6385,6 @@ } } }, - "node_modules/foreground-child": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz", - "integrity": "sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==", - "dev": true, - "dependencies": { - "cross-spawn": "^7.0.0", - "signal-exit": "^4.0.1" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/foreground-child/node_modules/signal-exit": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", - "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", - "dev": true, - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/fork-ts-checker-webpack-plugin": { "version": "6.5.3", "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.3.tgz", @@ -7071,9 +6958,9 @@ } }, "node_modules/hast-util-raw": { - "version": "9.0.2", - "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-9.0.2.tgz", - "integrity": "sha512-PldBy71wO9Uq1kyaMch9AHIghtQvIwxBUkv823pKmkTM3oV1JxtsTNYdevMxvUHqcnOAuO65JKU2+0NOxc2ksA==", + "version": "9.0.4", + "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-9.0.4.tgz", + "integrity": "sha512-LHE65TD2YiNsHD3YuXcKPHXPLuYh/gjp12mOfU8jxSrm1f/yJpsb0F/KKljS6U9LJoP0Ux+tCe8iJ2AsPzTdgA==", "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", @@ -7966,24 +7853,6 @@ "node": ">=0.10.0" } }, - "node_modules/jackspeak": { - "version": "2.3.6", - "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.6.tgz", - "integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==", - "dev": true, - "dependencies": { - "@isaacs/cliui": "^8.0.2" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - }, - "optionalDependencies": { - "@pkgjs/parseargs": "^0.11.0" - } - }, "node_modules/jest-util": { "version": "29.7.0", "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", @@ -8037,9 +7906,9 @@ } }, "node_modules/joi": { - "version": "17.13.0", - "resolved": "https://registry.npmjs.org/joi/-/joi-17.13.0.tgz", - "integrity": "sha512-9qcrTyoBmFZRNHeVP4edKqIUEgFzq7MHvTNSDuHSqkpOPtiBkgNgcmTSqmiw1kw9tdKaiddvIDv/eCJDxmqWCA==", + "version": "17.13.1", + "resolved": "https://registry.npmjs.org/joi/-/joi-17.13.1.tgz", + "integrity": "sha512-vaBlIKCyo4FCUtCm7Eu4QZd/q02bWcxfUO6YSXAZOWF6gzcLBeba8kwotUdYJjDLW8Cz8RywsSOqiNJZW0mNvg==", "dependencies": { "@hapi/hoek": "^9.3.0", "@hapi/topo": "^5.1.0", @@ -8173,11 +8042,14 @@ } }, "node_modules/lilconfig": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz", - "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.2.tgz", + "integrity": "sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==", "engines": { - "node": ">=10" + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" } }, "node_modules/lines-and-columns": { @@ -8353,9 +8225,9 @@ } }, "node_modules/mdast-util-from-markdown": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.0.tgz", - "integrity": "sha512-n7MTOr/z+8NAX/wmhhDji8O3bRvPTV/U0oTCaZJkjhPSKTPhS3xufVhKGF8s1pJ7Ox4QgoIU7KHseh09S+9rTA==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.1.tgz", + "integrity": "sha512-aJEUyzZ6TzlsX2s5B4Of7lN7EQtAxvtradMMglCQDyaTFgse6CmtmdJ15ElnVRlCg1vpNyVtbem0PWzlNieZsA==", "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", @@ -8635,9 +8507,9 @@ } }, "node_modules/mdast-util-to-hast": { - "version": "13.1.0", - "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.1.0.tgz", - "integrity": "sha512-/e2l/6+OdGp/FB+ctrJ9Avz71AN/GRH3oi/3KAx/kMnoUsD6q0woXlDT8lLEeViVKE7oZxE7RXzvO3T8kF2/sA==", + "version": "13.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz", + "integrity": "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==", "dependencies": { "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", @@ -10510,15 +10382,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/minipass": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.0.4.tgz", - "integrity": "sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ==", - "dev": true, - "engines": { - "node": ">=16 || 14 >=14.17" - } - }, "node_modules/mrmime": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-2.0.0.tgz", @@ -10544,17 +10407,6 @@ "multicast-dns": "cli.js" } }, - "node_modules/mz": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", - "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", - "dev": true, - "dependencies": { - "any-promise": "^1.0.0", - "object-assign": "^4.0.1", - "thenify-all": "^1.0.0" - } - }, "node_modules/nano-css": { "version": "5.6.1", "resolved": "https://registry.npmjs.org/nano-css/-/nano-css-5.6.1.tgz", @@ -10656,17 +10508,6 @@ "node": ">=0.10.0" } }, - "node_modules/normalize-url": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz", - "integrity": "sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/npm-run-path": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", @@ -10702,15 +10543,6 @@ "node": ">=0.10.0" } }, - "node_modules/object-hash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz", - "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", - "dev": true, - "engines": { - "node": ">= 6" - } - }, "node_modules/object-inspect": { "version": "1.13.1", "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", @@ -11041,31 +10873,6 @@ "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" }, - "node_modules/path-scurry": { - "version": "1.10.2", - "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.10.2.tgz", - "integrity": "sha512-7xTavNy5RQXnsjANvVvMkEjvloOinkAjv/Z6Ildz9v2RinZ4SBKTWFOVRbaF8p0vpHnyjV/UwNDdKuUv6M5qcA==", - "dev": true, - "dependencies": { - "lru-cache": "^10.2.0", - "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/path-scurry/node_modules/lru-cache": { - "version": "10.2.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.2.1.tgz", - "integrity": "sha512-tS24spDe/zXhWbNPErCHs/AGOzbKGHT+ybSBqmdLm8WZ1xXLWvH8Qn71QPAlqVhd0qUTWjy+Kl9JmISgDdEjsA==", - "dev": true, - "engines": { - "node": "14 || >=16.14" - } - }, "node_modules/path-to-regexp": { "version": "1.8.0", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz", @@ -11108,24 +10915,6 @@ "url": "https://github.com/sponsors/jonschlinkert" } }, - "node_modules/pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/pirates": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz", - "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==", - "dev": true, - "engines": { - "node": ">= 6" - } - }, "node_modules/pkg-dir": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-7.0.0.tgz", @@ -11235,200 +11024,108 @@ } }, "node_modules/postcss-calc": { - "version": "8.2.4", - "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-8.2.4.tgz", - "integrity": "sha512-SmWMSJmB8MRnnULldx0lQIyhSNvuDl9HfrZkaqqE/WHAhToYsAvDq+yAsA/kIyINDszOp3Rh0GFoNuH5Ypsm3Q==", + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-9.0.1.tgz", + "integrity": "sha512-TipgjGyzP5QzEhsOZUaIkeO5mKeMFpebWzRogWG/ysonUlnHcq5aJe0jOjpfzUU8PeSaBQnrE8ehR0QA5vs8PQ==", "dependencies": { - "postcss-selector-parser": "^6.0.9", + "postcss-selector-parser": "^6.0.11", "postcss-value-parser": "^4.2.0" }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, "peerDependencies": { "postcss": "^8.2.2" } }, "node_modules/postcss-colormin": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-5.3.1.tgz", - "integrity": "sha512-UsWQG0AqTFQmpBegeLLc1+c3jIqBNB0zlDGRWR+dQ3pRKJL1oeMzyqmH3o2PIfn9MBdNrVPWhDbT769LxCTLJQ==", + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-6.1.0.tgz", + "integrity": "sha512-x9yX7DOxeMAR+BgGVnNSAxmAj98NX/YxEMNFP+SDCEeNLb2r3i6Hh1ksMsnW8Ub5SLCpbescQqn9YEbE9554Sw==", "dependencies": { - "browserslist": "^4.21.4", + "browserslist": "^4.23.0", "caniuse-api": "^3.0.0", - "colord": "^2.9.1", + "colord": "^2.9.3", "postcss-value-parser": "^4.2.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-convert-values": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-5.1.3.tgz", - "integrity": "sha512-82pC1xkJZtcJEfiLw6UXnXVXScgtBrjlO5CBmuDQc+dlb88ZYheFsjTn40+zBVi3DkfF7iezO0nJUPLcJK3pvA==", + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-6.1.0.tgz", + "integrity": "sha512-zx8IwP/ts9WvUM6NkVSkiU902QZL1bwPhaVaLynPtCsOTqp+ZKbNi+s6XJg3rfqpKGA/oc7Oxk5t8pOQJcwl/w==", "dependencies": { - "browserslist": "^4.21.4", + "browserslist": "^4.23.0", "postcss-value-parser": "^4.2.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-discard-comments": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-5.1.2.tgz", - "integrity": "sha512-+L8208OVbHVF2UQf1iDmRcbdjJkuBF6IS29yBDSiWUIzpYaAhtNl6JYnYm12FnkeCwQqF5LeklOu6rAqgfBZqQ==", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-6.0.2.tgz", + "integrity": "sha512-65w/uIqhSBBfQmYnG92FO1mWZjJ4GL5b8atm5Yw2UgrwD7HiNiSSNwJor1eCFGzUgYnN/iIknhNRVqjrrpuglw==", "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-discard-duplicates": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-5.1.0.tgz", - "integrity": "sha512-zmX3IoSI2aoenxHV6C7plngHWWhUOV3sP1T8y2ifzxzbtnuhk1EdPwm0S1bIUNaJ2eNbWeGLEwzw8huPD67aQw==", + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-6.0.3.tgz", + "integrity": "sha512-+JA0DCvc5XvFAxwx6f/e68gQu/7Z9ud584VLmcgto28eB8FqSFZwtrLwB5Kcp70eIoWP/HXqz4wpo8rD8gpsTw==", "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-discard-empty": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-5.1.1.tgz", - "integrity": "sha512-zPz4WljiSuLWsI0ir4Mcnr4qQQ5e1Ukc3i7UfE2XcrwKK2LIPIqE5jxMRxO6GbI3cv//ztXDsXwEWT3BHOGh3A==", + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-6.0.3.tgz", + "integrity": "sha512-znyno9cHKQsK6PtxL5D19Fj9uwSzC2mB74cpT66fhgOadEUPyXFkbgwm5tvc3bt3NAy8ltE5MrghxovZRVnOjQ==", "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-discard-overridden": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-5.1.0.tgz", - "integrity": "sha512-21nOL7RqWR1kasIVdKs8HNqQJhFxLsyRfAnUDm4Fe4t4mCWL9OJiHvlHPjcd8zc5Myu89b/7wZDnOSjFgeWRtw==", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-6.0.2.tgz", + "integrity": "sha512-j87xzI4LUggC5zND7KdjsI25APtyMuynXZSujByMaav2roV6OZX+8AaCUcZSWqckZpjAjRyFDdpqybgjFO0HJQ==", "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-discard-unused": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-discard-unused/-/postcss-discard-unused-5.1.0.tgz", - "integrity": "sha512-KwLWymI9hbwXmJa0dkrzpRbSJEh0vVUd7r8t0yOGPcfKzyJJxFM8kLyC5Ev9avji6nY95pOp1W6HqIrfT+0VGw==", - "dependencies": { - "postcss-selector-parser": "^6.0.5" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-import": { - "version": "15.1.0", - "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz", - "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", - "dev": true, - "dependencies": { - "postcss-value-parser": "^4.0.0", - "read-cache": "^1.0.0", - "resolve": "^1.1.7" - }, - "engines": { - "node": ">=14.0.0" - }, - "peerDependencies": { - "postcss": "^8.0.0" - } - }, - "node_modules/postcss-js": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.0.1.tgz", - "integrity": "sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==", - "dev": true, + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/postcss-discard-unused/-/postcss-discard-unused-6.0.5.tgz", + "integrity": "sha512-wHalBlRHkaNnNwfC8z+ppX57VhvS+HWgjW508esjdaEYr3Mx7Gnn2xA4R/CKf5+Z9S5qsqC+Uzh4ueENWwCVUA==", "dependencies": { - "camelcase-css": "^2.0.1" + "postcss-selector-parser": "^6.0.16" }, "engines": { - "node": "^12 || ^14 || >= 16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.4.21" - } - }, - "node_modules/postcss-load-config": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-4.0.2.tgz", - "integrity": "sha512-bSVhyJGL00wMVoPUzAVAnbEoWyqRxkjv64tUl427SKnPrENtq6hJwUojroMz2VB+Q1edmi4IfrAPpami5VVgMQ==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "dependencies": { - "lilconfig": "^3.0.0", - "yaml": "^2.3.4" - }, - "engines": { - "node": ">= 14" - }, - "peerDependencies": { - "postcss": ">=8.0.9", - "ts-node": ">=9.0.0" - }, - "peerDependenciesMeta": { - "postcss": { - "optional": true - }, - "ts-node": { - "optional": true - } - } - }, - "node_modules/postcss-load-config/node_modules/lilconfig": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.1.tgz", - "integrity": "sha512-O18pf7nyvHTckunPWCV1XUNXU1piu01y2b7ATJ0ppkUkk8ocqVWBrYjJBCwHDjD/ZWcfyrA0P4gKhzWGi5EINQ==", - "dev": true, - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/antonk52" - } - }, - "node_modules/postcss-load-config/node_modules/yaml": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.4.1.tgz", - "integrity": "sha512-pIXzoImaqmfOrL7teGUBt/T7ZDnyeGBWyXQBvOVhLkWLN37GXv8NMLK406UY6dS51JfcQHsmcW5cJ441bHg6Lg==", - "dev": true, - "bin": { - "yaml": "bin.mjs" - }, - "engines": { - "node": ">= 14" + "postcss": "^8.4.31" } }, "node_modules/postcss-loader": { @@ -11452,136 +11149,111 @@ "webpack": "^5.0.0" } }, - "node_modules/postcss-loader/node_modules/cosmiconfig": { - "version": "8.3.6", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz", - "integrity": "sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==", - "dependencies": { - "import-fresh": "^3.3.0", - "js-yaml": "^4.1.0", - "parse-json": "^5.2.0", - "path-type": "^4.0.0" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/d-fischer" - }, - "peerDependencies": { - "typescript": ">=4.9.5" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, "node_modules/postcss-merge-idents": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-merge-idents/-/postcss-merge-idents-5.1.1.tgz", - "integrity": "sha512-pCijL1TREiCoog5nQp7wUe+TUonA2tC2sQ54UGeMmryK3UFGIYKqDyjnqd6RcuI4znFn9hWSLNN8xKE/vWcUQw==", + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-merge-idents/-/postcss-merge-idents-6.0.3.tgz", + "integrity": "sha512-1oIoAsODUs6IHQZkLQGO15uGEbK3EAl5wi9SS8hs45VgsxQfMnxvt+L+zIr7ifZFIH14cfAeVe2uCTa+SPRa3g==", "dependencies": { - "cssnano-utils": "^3.1.0", + "cssnano-utils": "^4.0.2", "postcss-value-parser": "^4.2.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-merge-longhand": { - "version": "5.1.7", - "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-5.1.7.tgz", - "integrity": "sha512-YCI9gZB+PLNskrK0BB3/2OzPnGhPkBEwmwhfYk1ilBHYVAZB7/tkTHFBAnCrvBBOmeYyMYw3DMjT55SyxMBzjQ==", + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-6.0.5.tgz", + "integrity": "sha512-5LOiordeTfi64QhICp07nzzuTDjNSO8g5Ksdibt44d+uvIIAE1oZdRn8y/W5ZtYgRH/lnLDlvi9F8btZcVzu3w==", "dependencies": { "postcss-value-parser": "^4.2.0", - "stylehacks": "^5.1.1" + "stylehacks": "^6.1.1" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-merge-rules": { - "version": "5.1.4", - "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-5.1.4.tgz", - "integrity": "sha512-0R2IuYpgU93y9lhVbO/OylTtKMVcHb67zjWIfCiKR9rWL3GUk1677LAqD/BcHizukdZEjT8Ru3oHRoAYoJy44g==", + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-6.1.1.tgz", + "integrity": "sha512-KOdWF0gju31AQPZiD+2Ar9Qjowz1LTChSjFFbS+e2sFgc4uHOp3ZvVX4sNeTlk0w2O31ecFGgrFzhO0RSWbWwQ==", "dependencies": { - "browserslist": "^4.21.4", + "browserslist": "^4.23.0", "caniuse-api": "^3.0.0", - "cssnano-utils": "^3.1.0", - "postcss-selector-parser": "^6.0.5" + "cssnano-utils": "^4.0.2", + "postcss-selector-parser": "^6.0.16" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-minify-font-values": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-5.1.0.tgz", - "integrity": "sha512-el3mYTgx13ZAPPirSVsHqFzl+BBBDrXvbySvPGFnQcTI4iNslrPaFq4muTkLZmKlGk4gyFAYUBMH30+HurREyA==", + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-6.1.0.tgz", + "integrity": "sha512-gklfI/n+9rTh8nYaSJXlCo3nOKqMNkxuGpTn/Qm0gstL3ywTr9/WRKznE+oy6fvfolH6dF+QM4nCo8yPLdvGJg==", "dependencies": { "postcss-value-parser": "^4.2.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-minify-gradients": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-5.1.1.tgz", - "integrity": "sha512-VGvXMTpCEo4qHTNSa9A0a3D+dxGFZCYwR6Jokk+/3oB6flu2/PnPXAh2x7x52EkY5xlIHLm+Le8tJxe/7TNhzw==", + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-6.0.3.tgz", + "integrity": "sha512-4KXAHrYlzF0Rr7uc4VrfwDJ2ajrtNEpNEuLxFgwkhFZ56/7gaE4Nr49nLsQDZyUe+ds+kEhf+YAUolJiYXF8+Q==", "dependencies": { - "colord": "^2.9.1", - "cssnano-utils": "^3.1.0", + "colord": "^2.9.3", + "cssnano-utils": "^4.0.2", "postcss-value-parser": "^4.2.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-minify-params": { - "version": "5.1.4", - "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-5.1.4.tgz", - "integrity": "sha512-+mePA3MgdmVmv6g+30rn57USjOGSAyuxUmkfiWpzalZ8aiBkdPYjXWtHuwJGm1v5Ojy0Z0LaSYhHaLJQB0P8Jw==", + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-6.1.0.tgz", + "integrity": "sha512-bmSKnDtyyE8ujHQK0RQJDIKhQ20Jq1LYiez54WiaOoBtcSuflfK3Nm596LvbtlFcpipMjgClQGyGr7GAs+H1uA==", "dependencies": { - "browserslist": "^4.21.4", - "cssnano-utils": "^3.1.0", + "browserslist": "^4.23.0", + "cssnano-utils": "^4.0.2", "postcss-value-parser": "^4.2.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-minify-selectors": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-5.2.1.tgz", - "integrity": "sha512-nPJu7OjZJTsVUmPdm2TcaiohIwxP+v8ha9NehQ2ye9szv4orirRU3SDdtUmKH+10nzn0bAyOXZ0UEr7OpvLehg==", + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-6.0.4.tgz", + "integrity": "sha512-L8dZSwNLgK7pjTto9PzWRoMbnLq5vsZSTu8+j1P/2GB8qdtGQfn+K1uSvFgYvgh83cbyxT5m43ZZhUMTJDSClQ==", "dependencies": { - "postcss-selector-parser": "^6.0.5" + "postcss-selector-parser": "^6.0.16" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-modules-extract-imports": { @@ -11639,206 +11311,186 @@ "postcss": "^8.1.0" } }, - "node_modules/postcss-nested": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.0.1.tgz", - "integrity": "sha512-mEp4xPMi5bSWiMbsgoPfcP74lsWLHkQbZc3sY+jWYd65CUwXrUaTp0fmNpa01ZcETKlIgUdFN/MpS2xZtqL9dQ==", - "dev": true, - "dependencies": { - "postcss-selector-parser": "^6.0.11" - }, - "engines": { - "node": ">=12.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - "peerDependencies": { - "postcss": "^8.2.14" - } - }, "node_modules/postcss-normalize-charset": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-5.1.0.tgz", - "integrity": "sha512-mSgUJ+pd/ldRGVx26p2wz9dNZ7ji6Pn8VWBajMXFf8jk7vUoSrZ2lt/wZR7DtlZYKesmZI680qjr2CeFF2fbUg==", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-6.0.2.tgz", + "integrity": "sha512-a8N9czmdnrjPHa3DeFlwqst5eaL5W8jYu3EBbTTkI5FHkfMhFZh1EGbku6jhHhIzTA6tquI2P42NtZ59M/H/kQ==", "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-normalize-display-values": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-5.1.0.tgz", - "integrity": "sha512-WP4KIM4o2dazQXWmFaqMmcvsKmhdINFblgSeRgn8BJ6vxaMyaJkwAzpPpuvSIoG/rmX3M+IrRZEz2H0glrQNEA==", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-6.0.2.tgz", + "integrity": "sha512-8H04Mxsb82ON/aAkPeq8kcBbAtI5Q2a64X/mnRRfPXBq7XeogoQvReqxEfc0B4WPq1KimjezNC8flUtC3Qz6jg==", "dependencies": { "postcss-value-parser": "^4.2.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-normalize-positions": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-5.1.1.tgz", - "integrity": "sha512-6UpCb0G4eofTCQLFVuI3EVNZzBNPiIKcA1AKVka+31fTVySphr3VUgAIULBhxZkKgwLImhzMR2Bw1ORK+37INg==", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-6.0.2.tgz", + "integrity": "sha512-/JFzI441OAB9O7VnLA+RtSNZvQ0NCFZDOtp6QPFo1iIyawyXg0YI3CYM9HBy1WvwCRHnPep/BvI1+dGPKoXx/Q==", "dependencies": { "postcss-value-parser": "^4.2.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-normalize-repeat-style": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-5.1.1.tgz", - "integrity": "sha512-mFpLspGWkQtBcWIRFLmewo8aC3ImN2i/J3v8YCFUwDnPu3Xz4rLohDO26lGjwNsQxB3YF0KKRwspGzE2JEuS0g==", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-6.0.2.tgz", + "integrity": "sha512-YdCgsfHkJ2jEXwR4RR3Tm/iOxSfdRt7jplS6XRh9Js9PyCR/aka/FCb6TuHT2U8gQubbm/mPmF6L7FY9d79VwQ==", "dependencies": { "postcss-value-parser": "^4.2.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-normalize-string": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-5.1.0.tgz", - "integrity": "sha512-oYiIJOf4T9T1N4i+abeIc7Vgm/xPCGih4bZz5Nm0/ARVJ7K6xrDlLwvwqOydvyL3RHNf8qZk6vo3aatiw/go3w==", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-6.0.2.tgz", + "integrity": "sha512-vQZIivlxlfqqMp4L9PZsFE4YUkWniziKjQWUtsxUiVsSSPelQydwS8Wwcuw0+83ZjPWNTl02oxlIvXsmmG+CiQ==", "dependencies": { "postcss-value-parser": "^4.2.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-normalize-timing-functions": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-5.1.0.tgz", - "integrity": "sha512-DOEkzJ4SAXv5xkHl0Wa9cZLF3WCBhF3o1SKVxKQAa+0pYKlueTpCgvkFAHfk+Y64ezX9+nITGrDZeVGgITJXjg==", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-6.0.2.tgz", + "integrity": "sha512-a+YrtMox4TBtId/AEwbA03VcJgtyW4dGBizPl7e88cTFULYsprgHWTbfyjSLyHeBcK/Q9JhXkt2ZXiwaVHoMzA==", "dependencies": { "postcss-value-parser": "^4.2.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-normalize-unicode": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-5.1.1.tgz", - "integrity": "sha512-qnCL5jzkNUmKVhZoENp1mJiGNPcsJCs1aaRmURmeJGES23Z/ajaln+EPTD+rBeNkSryI+2WTdW+lwcVdOikrpA==", + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-6.1.0.tgz", + "integrity": "sha512-QVC5TQHsVj33otj8/JD869Ndr5Xcc/+fwRh4HAsFsAeygQQXm+0PySrKbr/8tkDKzW+EVT3QkqZMfFrGiossDg==", "dependencies": { - "browserslist": "^4.21.4", + "browserslist": "^4.23.0", "postcss-value-parser": "^4.2.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-normalize-url": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-5.1.0.tgz", - "integrity": "sha512-5upGeDO+PVthOxSmds43ZeMeZfKH+/DKgGRD7TElkkyS46JXAUhMzIKiCa7BabPeIy3AQcTkXwVVN7DbqsiCew==", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-6.0.2.tgz", + "integrity": "sha512-kVNcWhCeKAzZ8B4pv/DnrU1wNh458zBNp8dh4y5hhxih5RZQ12QWMuQrDgPRw3LRl8mN9vOVfHl7uhvHYMoXsQ==", "dependencies": { - "normalize-url": "^6.0.1", "postcss-value-parser": "^4.2.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-normalize-whitespace": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-5.1.1.tgz", - "integrity": "sha512-83ZJ4t3NUDETIHTa3uEg6asWjSBYL5EdkVB0sDncx9ERzOKBVJIUeDO9RyA9Zwtig8El1d79HBp0JEi8wvGQnA==", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-6.0.2.tgz", + "integrity": "sha512-sXZ2Nj1icbJOKmdjXVT9pnyHQKiSAyuNQHSgRCUgThn2388Y9cGVDR+E9J9iAYbSbLHI+UUwLVl1Wzco/zgv0Q==", "dependencies": { "postcss-value-parser": "^4.2.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-ordered-values": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-5.1.3.tgz", - "integrity": "sha512-9UO79VUhPwEkzbb3RNpqqghc6lcYej1aveQteWY+4POIwlqkYE21HKWaLDF6lWNuqCobEAyTovVhtI32Rbv2RQ==", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-6.0.2.tgz", + "integrity": "sha512-VRZSOB+JU32RsEAQrO94QPkClGPKJEL/Z9PCBImXMhIeK5KAYo6slP/hBYlLgrCjFxyqvn5VC81tycFEDBLG1Q==", "dependencies": { - "cssnano-utils": "^3.1.0", + "cssnano-utils": "^4.0.2", "postcss-value-parser": "^4.2.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-reduce-idents": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/postcss-reduce-idents/-/postcss-reduce-idents-5.2.0.tgz", - "integrity": "sha512-BTrLjICoSB6gxbc58D5mdBK8OhXRDqud/zodYfdSi52qvDHdMwk+9kB9xsM8yJThH/sZU5A6QVSmMmaN001gIg==", + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-reduce-idents/-/postcss-reduce-idents-6.0.3.tgz", + "integrity": "sha512-G3yCqZDpsNPoQgbDUy3T0E6hqOQ5xigUtBQyrmq3tn2GxlyiL0yyl7H+T8ulQR6kOcHJ9t7/9H4/R2tv8tJbMA==", "dependencies": { "postcss-value-parser": "^4.2.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-reduce-initial": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-5.1.2.tgz", - "integrity": "sha512-dE/y2XRaqAi6OvjzD22pjTUQ8eOfc6m/natGHgKFBK9DxFmIm69YmaRVQrGgFlEfc1HePIurY0TmDeROK05rIg==", + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-6.1.0.tgz", + "integrity": "sha512-RarLgBK/CrL1qZags04oKbVbrrVK2wcxhvta3GCxrZO4zveibqbRPmm2VI8sSgCXwoUHEliRSbOfpR0b/VIoiw==", "dependencies": { - "browserslist": "^4.21.4", + "browserslist": "^4.23.0", "caniuse-api": "^3.0.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-reduce-transforms": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-5.1.0.tgz", - "integrity": "sha512-2fbdbmgir5AvpW9RLtdONx1QoYG2/EtqpNQbFASDlixBbAYuTcJ0dECwlqNqH7VbaUnEnh8SrxOe2sRIn24XyQ==", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-6.0.2.tgz", + "integrity": "sha512-sB+Ya++3Xj1WaT9+5LOOdirAxP7dJZms3GRcYheSPi1PiTMigsxHAdkrbItHxwYHr4kt1zL7mmcHstgMYT+aiA==", "dependencies": { "postcss-value-parser": "^4.2.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-selector-parser": { @@ -11854,46 +11506,46 @@ } }, "node_modules/postcss-sort-media-queries": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-4.4.1.tgz", - "integrity": "sha512-QDESFzDDGKgpiIh4GYXsSy6sek2yAwQx1JASl5AxBtU1Lq2JfKBljIPNdil989NcSKRQX1ToiaKphImtBuhXWw==", + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-5.2.0.tgz", + "integrity": "sha512-AZ5fDMLD8SldlAYlvi8NIqo0+Z8xnXU2ia0jxmuhxAU+Lqt9K+AlmLNJ/zWEnE9x+Zx3qL3+1K20ATgNOr3fAA==", "dependencies": { - "sort-css-media-queries": "2.1.0" + "sort-css-media-queries": "2.2.0" }, "engines": { - "node": ">=10.0.0" + "node": ">=14.0.0" }, "peerDependencies": { - "postcss": "^8.4.16" + "postcss": "^8.4.23" } }, "node_modules/postcss-svgo": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-5.1.0.tgz", - "integrity": "sha512-D75KsH1zm5ZrHyxPakAxJWtkyXew5qwS70v56exwvw542d9CRtTo78K0WeFxZB4G7JXKKMbEZtZayTGdIky/eA==", + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-6.0.3.tgz", + "integrity": "sha512-dlrahRmxP22bX6iKEjOM+c8/1p+81asjKT+V5lrgOH944ryx/OHpclnIbGsKVd3uWOXFLYJwCVf0eEkJGvO96g==", "dependencies": { "postcss-value-parser": "^4.2.0", - "svgo": "^2.7.0" + "svgo": "^3.2.0" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >= 18" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-unique-selectors": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-5.1.1.tgz", - "integrity": "sha512-5JiODlELrz8L2HwxfPnhOWZYWDxVHWL83ufOv84NrcgipI7TaeRsatAhK4Tr2/ZiYldpK/wBvw5BD3qfaK96GA==", + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-6.0.4.tgz", + "integrity": "sha512-K38OCaIrO8+PzpArzkLKB42dSARtC2tmG6PvD4b1o1Q2E9Os8jzfWFfSy/rixsHwohtsDdFtAWGjFVFUdwYaMg==", "dependencies": { - "postcss-selector-parser": "^6.0.5" + "postcss-selector-parser": "^6.0.16" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/postcss-value-parser": { @@ -11902,14 +11554,14 @@ "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==" }, "node_modules/postcss-zindex": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-5.1.0.tgz", - "integrity": "sha512-fgFMf0OtVSBR1va1JNHYgMxYk73yhn/qb4uQDq1DLGYolz8gHCyr/sesEuGUaYs58E3ZJRcpoGuPVoB7Meiq9A==", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-6.0.2.tgz", + "integrity": "sha512-5BxW9l1evPB/4ZIc+2GobEBoKC+h8gPGCMi+jxsYvd2x0mjq7wazk6DrP71pStqxE9Foxh5TVnonbWpFZzXaYg==", "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/pretty-error": { @@ -12142,9 +11794,9 @@ } }, "node_modules/react": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz", - "integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==", + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", + "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", "dependencies": { "loose-envify": "^1.1.0" }, @@ -12271,15 +11923,15 @@ } }, "node_modules/react-dom": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz", - "integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==", + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", "dependencies": { "loose-envify": "^1.1.0", - "scheduler": "^0.23.0" + "scheduler": "^0.23.2" }, "peerDependencies": { - "react": "^18.2.0" + "react": "^18.3.1" } }, "node_modules/react-error-overlay": { @@ -12322,9 +11974,9 @@ "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" }, "node_modules/react-json-view-lite": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/react-json-view-lite/-/react-json-view-lite-1.3.0.tgz", - "integrity": "sha512-aN1biKC5v4DQkmQBlZjuMFR09MKZGMPtIg+cut8zEeg2HXd6gl2gRy0n4HMacHf0dznQgo0SVXN7eT8zV3hEuQ==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/react-json-view-lite/-/react-json-view-lite-1.4.0.tgz", + "integrity": "sha512-wh6F6uJyYAmQ4fK0e8dSQMEWuvTs2Wr3el3sLD9bambX1+pSWUVXIz1RFaoy3TI1mZ0FqdpKq9YgbgTTgyrmXA==", "engines": { "node": ">=14" }, @@ -12334,12 +11986,11 @@ }, "node_modules/react-loadable": { "name": "@docusaurus/react-loadable", - "version": "5.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", - "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-6.0.0.tgz", + "integrity": "sha512-YMMxTUQV/QFSnbgrP3tjDzLHRg7vsbMn8e9HAa8o/1iXoiomo48b7sk/kkmWEuWNDPJVlKSJRB6Y2fHqdJk+SQ==", "dependencies": { - "@types/react": "*", - "prop-types": "^15.6.2" + "@types/react": "*" }, "peerDependencies": { "react": "*" @@ -12442,15 +12093,6 @@ "react-dom": "*" } }, - "node_modules/read-cache": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", - "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==", - "dev": true, - "dependencies": { - "pify": "^2.3.0" - } - }, "node_modules/readable-stream": { "version": "3.6.2", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", @@ -13008,14 +12650,14 @@ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" }, "node_modules/sax": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/sax/-/sax-1.3.0.tgz", - "integrity": "sha512-0s+oAmw9zLl1V1cS9BtZN7JAd0cW5e0QH4W3LWEK6a4LaLEA2OTpGYWDY+6XasBLtz6wkm3u1xRw95mRuJ59WA==" + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.4.1.tgz", + "integrity": "sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg==" }, "node_modules/scheduler": { - "version": "0.23.0", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz", - "integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==", + "version": "0.23.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", + "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", "dependencies": { "loose-envify": "^1.1.0" } @@ -13050,9 +12692,9 @@ } }, "node_modules/search-insights": { - "version": "2.13.0", - "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.13.0.tgz", - "integrity": "sha512-Orrsjf9trHHxFRuo9/rzm0KIWmgzE8RMlZMzuhZOJ01Rnz3D0YBAe+V6473t6/H6c7irs6Lt48brULAiRWb3Vw==", + "version": "2.14.0", + "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.14.0.tgz", + "integrity": "sha512-OLN6MsPMCghDOqlCtsIsYgtsC0pnwVTyT9Mu6A3ewOj1DxvzZF6COrn2g86E/c05xbktB0XN04m/t1Z+n+fTGw==", "peer": true }, "node_modules/section-matter": { @@ -13418,9 +13060,9 @@ "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" }, "node_modules/sitemap": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-7.1.1.tgz", - "integrity": "sha512-mK3aFtjz4VdJN0igpIJrinf3EO8U8mxOPsTBzSsy06UtjZQJ3YY3o3Xa7zSc5nMqcMrRwlChHZ18Kxg0caiPBg==", + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-7.1.2.tgz", + "integrity": "sha512-ARCqzHJ0p4gWt+j7NlU5eDlIO9+Rkr/JhPFZKKQ1l5GCus7rJH4UdrlVAh0xC/gDS/Qir2UMxqYNHtsKr2rpCw==", "dependencies": { "@types/node": "^17.0.5", "@types/sax": "^1.2.1", @@ -13459,6 +13101,15 @@ "node": ">=8" } }, + "node_modules/snake-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/snake-case/-/snake-case-3.0.4.tgz", + "integrity": "sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg==", + "dependencies": { + "dot-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, "node_modules/sockjs": { "version": "0.3.24", "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz", @@ -13470,9 +13121,9 @@ } }, "node_modules/sort-css-media-queries": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/sort-css-media-queries/-/sort-css-media-queries-2.1.0.tgz", - "integrity": "sha512-IeWvo8NkNiY2vVYdPa27MCQiR0MN0M80johAYFVxWWXQ44KU84WNxjslwBHmc/7ZL2ccwkM7/e6S5aiKZXm7jA==", + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/sort-css-media-queries/-/sort-css-media-queries-2.2.0.tgz", + "integrity": "sha512-0xtkGhWCC9MGt/EzgnvbbbKhqWjl1+/rncmhTh5qCpbYguXh6S/qwePfv/JQ8jePXXmqingylxoC49pCkSPIbA==", "engines": { "node": ">= 6.3.0" } @@ -13563,12 +13214,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/stable": { - "version": "0.1.8", - "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz", - "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==", - "deprecated": "Modern JS already guarantees Array#sort() is a stable sort, so this library is deprecated. See the compatibility table on MDN: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/sort#browser_compatibility" - }, "node_modules/stack-generator": { "version": "2.0.10", "resolved": "https://registry.npmjs.org/stack-generator/-/stack-generator-2.0.10.tgz", @@ -13646,27 +13291,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/string-width-cjs": { - "name": "string-width", - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/string-width-cjs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true - }, "node_modules/string-width/node_modules/ansi-regex": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", @@ -13729,19 +13353,6 @@ "node": ">=8" } }, - "node_modules/strip-ansi-cjs": { - "name": "strip-ansi", - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/strip-bom-string": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", @@ -13778,18 +13389,18 @@ } }, "node_modules/stylehacks": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-5.1.1.tgz", - "integrity": "sha512-sBpcd5Hx7G6seo7b1LkpttvTz7ikD0LlH5RmdcBNb6fFR0Fl7LQwHDFr300q4cwUqi+IYrFGmsIHieMBfnN/Bw==", + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-6.1.1.tgz", + "integrity": "sha512-gSTTEQ670cJNoaeIp9KX6lZmm8LJ3jPB5yJmX8Zq/wQxOsAFXV3qjWzHas3YYk1qesuVIyYWWUpZ0vSE/dTSGg==", "dependencies": { - "browserslist": "^4.21.4", - "postcss-selector-parser": "^6.0.4" + "browserslist": "^4.23.0", + "postcss-selector-parser": "^6.0.16" }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": "^14 || ^16 || >=18.0" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.4.31" } }, "node_modules/stylis": { @@ -13797,83 +13408,6 @@ "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.2.tgz", "integrity": "sha512-bhtUjWd/z6ltJiQwg0dUfxEJ+W+jdqQd8TbWLWyeIJHlnsqmGLRFFd8e5mA0AZi/zx90smXRlN66YMTcaSFifg==" }, - "node_modules/sucrase": { - "version": "3.35.0", - "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.0.tgz", - "integrity": "sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==", - "dev": true, - "dependencies": { - "@jridgewell/gen-mapping": "^0.3.2", - "commander": "^4.0.0", - "glob": "^10.3.10", - "lines-and-columns": "^1.1.6", - "mz": "^2.7.0", - "pirates": "^4.0.1", - "ts-interface-checker": "^0.1.9" - }, - "bin": { - "sucrase": "bin/sucrase", - "sucrase-node": "bin/sucrase-node" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - } - }, - "node_modules/sucrase/node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/sucrase/node_modules/commander": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", - "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", - "dev": true, - "engines": { - "node": ">= 6" - } - }, - "node_modules/sucrase/node_modules/glob": { - "version": "10.3.12", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.3.12.tgz", - "integrity": "sha512-TCNv8vJ+xz4QiqTpfOJA7HvYv+tNIRHKfUWw/q+v2jdgN4ebz+KY9tGx5J4rHP0o84mNP+ApH66HRX8us3Khqg==", - "dev": true, - "dependencies": { - "foreground-child": "^3.1.0", - "jackspeak": "^2.3.6", - "minimatch": "^9.0.1", - "minipass": "^7.0.4", - "path-scurry": "^1.10.2" - }, - "bin": { - "glob": "dist/esm/bin.mjs" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/sucrase/node_modules/minimatch": { - "version": "9.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.4.tgz", - "integrity": "sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw==", - "dev": true, - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/supports-color": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", @@ -13902,23 +13436,27 @@ "integrity": "sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==" }, "node_modules/svgo": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/svgo/-/svgo-2.8.0.tgz", - "integrity": "sha512-+N/Q9kV1+F+UeWYoSiULYo4xYSDQlTgb+ayMobAXPwMnLvop7oxKMo9OzIrX5x3eS4L4f2UHhc9axXwY8DpChg==", + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/svgo/-/svgo-3.3.2.tgz", + "integrity": "sha512-OoohrmuUlBs8B8o6MB2Aevn+pRIH9zDALSR+6hhqVfa6fRwG/Qw9VUMSMW9VNg2CFc/MTIfabtdOVl9ODIJjpw==", "dependencies": { "@trysound/sax": "0.2.0", "commander": "^7.2.0", - "css-select": "^4.1.3", - "css-tree": "^1.1.3", - "csso": "^4.2.0", - "picocolors": "^1.0.0", - "stable": "^0.1.8" + "css-select": "^5.1.0", + "css-tree": "^2.3.1", + "css-what": "^6.1.0", + "csso": "^5.0.5", + "picocolors": "^1.0.0" }, "bin": { "svgo": "bin/svgo" }, "engines": { - "node": ">=10.13.0" + "node": ">=14.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/svgo" } }, "node_modules/svgo/node_modules/commander": { @@ -13929,117 +13467,22 @@ "node": ">= 10" } }, - "node_modules/svgo/node_modules/css-select": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz", - "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^6.0.1", - "domhandler": "^4.3.1", - "domutils": "^2.8.0", - "nth-check": "^2.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/svgo/node_modules/dom-serializer": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz", - "integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.2.0", - "entities": "^2.0.0" - }, - "funding": { - "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" - } - }, - "node_modules/svgo/node_modules/domhandler": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz", - "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==", - "dependencies": { - "domelementtype": "^2.2.0" - }, - "engines": { - "node": ">= 4" - }, - "funding": { - "url": "https://github.com/fb55/domhandler?sponsor=1" - } - }, - "node_modules/svgo/node_modules/domutils": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", - "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", - "dependencies": { - "dom-serializer": "^1.0.1", - "domelementtype": "^2.2.0", - "domhandler": "^4.2.0" - }, - "funding": { - "url": "https://github.com/fb55/domutils?sponsor=1" - } - }, - "node_modules/svgo/node_modules/entities": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", - "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, - "node_modules/tailwindcss": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.3.tgz", - "integrity": "sha512-U7sxQk/n397Bmx4JHbJx/iSOOv5G+II3f1kpLpY2QeUv5DcPdcTsYLlusZfq1NthHS1c1cZoyFmmkex1rzke0A==", - "dev": true, + "node_modules/svgo/node_modules/css-tree": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.3.1.tgz", + "integrity": "sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw==", "dependencies": { - "@alloc/quick-lru": "^5.2.0", - "arg": "^5.0.2", - "chokidar": "^3.5.3", - "didyoumean": "^1.2.2", - "dlv": "^1.1.3", - "fast-glob": "^3.3.0", - "glob-parent": "^6.0.2", - "is-glob": "^4.0.3", - "jiti": "^1.21.0", - "lilconfig": "^2.1.0", - "micromatch": "^4.0.5", - "normalize-path": "^3.0.0", - "object-hash": "^3.0.0", - "picocolors": "^1.0.0", - "postcss": "^8.4.23", - "postcss-import": "^15.1.0", - "postcss-js": "^4.0.1", - "postcss-load-config": "^4.0.1", - "postcss-nested": "^6.0.1", - "postcss-selector-parser": "^6.0.11", - "resolve": "^1.22.2", - "sucrase": "^3.32.0" - }, - "bin": { - "tailwind": "lib/cli.js", - "tailwindcss": "lib/cli.js" + "mdn-data": "2.0.30", + "source-map-js": "^1.0.1" }, "engines": { - "node": ">=14.0.0" + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" } }, - "node_modules/tailwindcss/node_modules/glob-parent": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", - "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", - "dev": true, - "dependencies": { - "is-glob": "^4.0.3" - }, - "engines": { - "node": ">=10.13.0" - } + "node_modules/svgo/node_modules/mdn-data": { + "version": "2.0.30", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.30.tgz", + "integrity": "sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==" }, "node_modules/tapable": { "version": "2.2.1", @@ -14181,27 +13624,6 @@ "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==" }, - "node_modules/thenify": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", - "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", - "dev": true, - "dependencies": { - "any-promise": "^1.0.0" - } - }, - "node_modules/thenify-all": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", - "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", - "dev": true, - "dependencies": { - "thenify": ">= 3.1.0 < 4" - }, - "engines": { - "node": ">=0.8" - } - }, "node_modules/throttle-debounce": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/throttle-debounce/-/throttle-debounce-3.0.1.tgz", @@ -14288,12 +13710,6 @@ "resolved": "https://registry.npmjs.org/ts-easing/-/ts-easing-0.2.0.tgz", "integrity": "sha512-Z86EW+fFFh/IFB1fqQ3/+7Zpf9t2ebOAxNI/V6Wo7r5gqiqtxmgTlQ1qbqQcjLKYeSHPTsEmvlJUDg/EuL0uHQ==" }, - "node_modules/ts-interface-checker": { - "version": "0.1.13", - "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", - "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", - "dev": true - }, "node_modules/tslib": { "version": "2.6.2", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", @@ -14350,9 +13766,9 @@ } }, "node_modules/typescript": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz", - "integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==", + "version": "5.5.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.5.3.tgz", + "integrity": "sha512-/hreyEujaB0w76zKo6717l3L0o/qEUtRgdvUBvlkhoWeOVMjMuHNHk0BRBzikzuGDqNmPQbg5ifMEqsHLiIUcQ==", "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -15245,44 +14661,6 @@ "url": "https://github.com/chalk/wrap-ansi?sponsor=1" } }, - "node_modules/wrap-ansi-cjs": { - "name": "wrap-ansi", - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true - }, - "node_modules/wrap-ansi-cjs/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/wrap-ansi/node_modules/ansi-regex": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", diff --git a/docs/package.json b/docs/package.json index 095045f5f3f5..699c0381f56b 100644 --- a/docs/package.json +++ b/docs/package.json @@ -15,24 +15,22 @@ "typecheck": "tsc" }, "dependencies": { - "@docusaurus/core": "3.2.1", - "@docusaurus/preset-classic": "3.2.1", + "@docusaurus/core": "^3.4.0", + "@docusaurus/plugin-content-pages": "^3.4.0", + "@docusaurus/preset-classic": "^3.4.0", "@mdx-js/react": "^3.0.0", "clsx": "^2.0.0", "prism-react-renderer": "^2.3.0", - "react": "^18.0.0", - "react-dom": "^18.0.0", + "react": "^18.3.1", + "react-dom": "^18.3.1", "react-icons": "^5.2.1", "react-use": "^17.5.0" }, "devDependencies": { - "@docusaurus/module-type-aliases": "3.2.1", - "@docusaurus/tsconfig": "3.2.1", - "@docusaurus/types": "3.2.1", - "autoprefixer": "^10.4.19", - "postcss": "^8.4.38", - "tailwindcss": "^3.4.3", - "typescript": "~5.2.2" + "@docusaurus/module-type-aliases": "^3.4.0", + "@docusaurus/tsconfig": "^3.4.0", + "@docusaurus/types": "^3.4.0", + "typescript": "~5.5.3" }, "browserslist": { "production": [ diff --git a/docs/plugins/tailwind-config.cjs b/docs/plugins/tailwind-config.cjs new file mode 100644 index 000000000000..182c0d6e6bdb --- /dev/null +++ b/docs/plugins/tailwind-config.cjs @@ -0,0 +1,13 @@ +export default function tailwindPlugin(context, options) { + return { + name: 'tailwind-plugin', + configurePostCss(postcssOptions) { + postcssOptions.plugins = [ + require('postcss-import'), + require('tailwindcss'), + require('autoprefixer'), + ]; + return postcssOptions; + }, + }; +} diff --git a/docs/postcss.config.js b/docs/postcss.config.js deleted file mode 100644 index 12a703d900da..000000000000 --- a/docs/postcss.config.js +++ /dev/null @@ -1,6 +0,0 @@ -module.exports = { - plugins: { - tailwindcss: {}, - autoprefixer: {}, - }, -}; diff --git a/docs/src/components/CustomFooter.tsx b/docs/src/components/CustomFooter.tsx index c6c781f1236b..36e12ac94aac 100644 --- a/docs/src/components/CustomFooter.tsx +++ b/docs/src/components/CustomFooter.tsx @@ -1,23 +1,42 @@ +import React from "react"; import { FaSlack, FaDiscord, FaGithub } from "react-icons/fa"; +import Translate from '@docusaurus/Translate'; +import "../css/footer.css"; function CustomFooter() { return ( -