From 5925cd97251a8f55b7d42169279089d56925f54f Mon Sep 17 00:00:00 2001 From: Volker Stampa Date: Mon, 30 Oct 2023 13:05:01 +0100 Subject: [PATCH] Fix sphinx warnings --- docs/conf.py | 5 +++++ src/intelligence_layer/core/complete.py | 2 +- src/intelligence_layer/core/logger.py | 9 +++++++-- src/intelligence_layer/core/prompt_template.py | 2 +- src/intelligence_layer/core/task.py | 5 +++-- src/intelligence_layer/core/text_highlight.py | 3 ++- 6 files changed, 19 insertions(+), 7 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index a7f26a00e..2b7e35438 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -15,6 +15,11 @@ extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon", "sphinx.ext.viewcode"] +autodoc_default_options = { + "private-members": None, + "exclude-members": "model_config,model_fields", +} + templates_path = ["_templates"] exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] diff --git a/src/intelligence_layer/core/complete.py b/src/intelligence_layer/core/complete.py index f3c27bd6b..4070f2098 100644 --- a/src/intelligence_layer/core/complete.py +++ b/src/intelligence_layer/core/complete.py @@ -88,7 +88,7 @@ class InstructOutput(BaseModel): Attributes: response: The generated response to the instruction. prompt_with_metadata: To handle the instruction, a `PromptTemplate` is used. - The template defines two `PromptRange`s: + The template defines two `PromptRange`\ s: - "instruction": covering the instruction text as provided in the `InstructionInput`. - "input": covering the input text as provided in the `InstructionInput`. These can for example be used for downstream `TextHighlight` tasks. diff --git a/src/intelligence_layer/core/logger.py b/src/intelligence_layer/core/logger.py index 736eb8a89..12ff2ab61 100644 --- a/src/intelligence_layer/core/logger.py +++ b/src/intelligence_layer/core/logger.py @@ -52,7 +52,7 @@ @runtime_checkable class DebugLogger(Protocol): - """A protocol for instrumenting `Task`s with structured logging. + """A protocol for instrumenting `Task`\ s with structured logging. A logger needs to provide a way to collect an individual log, which should be serializable, and a way to generate nested loggers, so that sub-tasks can emit logs that are grouped together. @@ -393,6 +393,11 @@ def _rich_render_(self) -> Tree: return tree +# Required for sphinx, see also: https://docs.pydantic.dev/2.4/errors/usage_errors/#class-not-fully-defined +InMemorySpan.model_rebuild() +InMemoryDebugLogger.model_rebuild() + + class StartTask(BaseModel): """Represents the payload/entry of a log-line indicating that a `TaskSpan` was opened through `DebugLogger.task_span`. @@ -498,7 +503,7 @@ class FileDebugLogger(DebugLogger): log_file_path: Denotes the file to log to. Attributes: - uuid: a uuid for the logger. If multiple `FileDebugLogger`s log to the same file + uuid: a uuid for the logger. If multiple `FileDebugLogger`\ s log to the same file the child-elements for a logger can be identified by referring to this id as parent. """ diff --git a/src/intelligence_layer/core/prompt_template.py b/src/intelligence_layer/core/prompt_template.py index 5ab5ff0fd..295ad7360 100644 --- a/src/intelligence_layer/core/prompt_template.py +++ b/src/intelligence_layer/core/prompt_template.py @@ -81,7 +81,7 @@ class PromptWithMetadata: Args: prompt: The actual `Prompt`. - ranges: A mapping of range name to a `Sequence` of corresponding `PromptRange`s. + ranges: A mapping of range name to a `Sequence` of corresponding `PromptRange`\ s. """ prompt: Prompt diff --git a/src/intelligence_layer/core/task.py b/src/intelligence_layer/core/task.py index b0b015e50..2f9fccaad 100644 --- a/src/intelligence_layer/core/task.py +++ b/src/intelligence_layer/core/task.py @@ -63,6 +63,7 @@ class Task(ABC, Generic[Input, Output]): Input: Interface to be passed to the task with all data needed to run the process. Ideally, these are specified in terms related to the use-case, rather than lower-level configuration options. + Output: Interface of the output returned by the task. """ @@ -111,8 +112,8 @@ def run_concurrently( this method call. This can be used to prevent queue-full or similar error of downstream APIs when the global concurrency limit is too high for a certain task. Returns: - The `Output`s generated by calling `run` for each given `Input`. The order of `Output`s - corresponds to the order of the `Input`s. + The `Output`\ s generated by calling `run` for each given `Input`. The order of `Output`\ s + corresponds to the order of the `Input`\ s. """ with debug_logger.span(f"Concurrent {type(self).__name__} tasks") as span: diff --git a/src/intelligence_layer/core/text_highlight.py b/src/intelligence_layer/core/text_highlight.py index 48c6a3fc1..54a82dc8e 100644 --- a/src/intelligence_layer/core/text_highlight.py +++ b/src/intelligence_layer/core/text_highlight.py @@ -75,9 +75,10 @@ class TextHighlight(Task[TextHighlightInput, TextHighlightOutput]): client: Aleph Alpha client instance for running model related API calls. Example: + >>> client = Client(os.getenv("AA_TOKEN")) >>> text_highlight = TextHighlight(client=client) - >>> prompt_template_str = "{% promptrange r1 %}Question: What is 2 + 2?{% endpromptrange %}\nAnswer:" + >>> prompt_template_str = "{% promptrange r1 %\Question: What is 2 + 2?{% endpromptrange %}\\nAnswer:" >>> template = PromptTemplate(prompt_template_str) >>> prompt_with_metadata = template.to_prompt_with_metadata() >>> completion = " 4."