diff --git a/README.md b/README.md index 219de4e..f185ad4 100644 --- a/README.md +++ b/README.md @@ -100,21 +100,6 @@ Aside from the list methods there are `exec` and `exec_file` methods that allow responses. Those functions also provide a streaming version of execution if you want to process the output streams in your code as the tool is running. -### `list_tools()` - -This function lists the available tools. - -```python -from gptscript.gptscript import GPTScript - - -async def list_tools(): - gptscript = GPTScript() - tools = await gptscript.list_tools() - print(tools) - gptscript.close() -``` - ### `list_models()` This function lists the available GPT models. diff --git a/gptscript/gptscript.py b/gptscript/gptscript.py index c76cff5..563bc64 100644 --- a/gptscript/gptscript.py +++ b/gptscript/gptscript.py @@ -148,9 +148,6 @@ async def _run_basic_command(self, sub_command: str, request_body: Any = None): async def version(self) -> str: return await self._run_basic_command("version") - async def list_tools(self) -> str: - return await self._run_basic_command("list-tools") - async def list_models(self, providers: list[str] = None, credential_overrides: list[str] = None) -> list[str]: if self.opts.DefaultModelProvider != "": if providers is None: diff --git a/tests/test_gptscript.py b/tests/test_gptscript.py index 4f87288..acb2754 100644 --- a/tests/test_gptscript.py +++ b/tests/test_gptscript.py @@ -144,12 +144,6 @@ async def test_list_models_from_default_provider(): g.close() -@pytest.mark.asyncio -async def test_list_tools(gptscript): - out = await gptscript.list_tools() - assert out is not None, "Expected some output from list_tools" - - @pytest.mark.asyncio async def test_abort_run(gptscript): async def abort_run(run: Run, e: CallFrame | RunFrame | PromptFrame):