From 3cf835f89c01933b6e05eab94c1d28f4c6ffd469 Mon Sep 17 00:00:00 2001 From: valentinfrlch Date: Sun, 10 Nov 2024 22:16:41 +0100 Subject: [PATCH 1/9] WIP: Refactoring providers into classes --- custom_components/llmvision/__init__.py | 10 +- .../{request_handlers.py => providers.py} | 641 +++++++++--------- 2 files changed, 343 insertions(+), 308 deletions(-) rename custom_components/llmvision/{request_handlers.py => providers.py} (50%) diff --git a/custom_components/llmvision/__init__.py b/custom_components/llmvision/__init__.py index 3d94311..fbbd33c 100644 --- a/custom_components/llmvision/__init__.py +++ b/custom_components/llmvision/__init__.py @@ -37,7 +37,7 @@ from datetime import timedelta from homeassistant.util import dt as dt_util from homeassistant.config_entries import ConfigEntry -from .request_handlers import RequestHandler +from .providers import RequestHandler from .media_handlers import MediaProcessor from homeassistant.core import SupportsResponse from homeassistant.exceptions import ServiceValidationError @@ -265,7 +265,7 @@ async def image_analyzer(data_call): ) # Validate configuration, input data and make the call - response = await client.make_request(call) + response = await client.forward_request(call) await _remember(hass, call, start, response) return response @@ -287,7 +287,7 @@ async def video_analyzer(data_call): include_filename=call.include_filename, expose_images=call.expose_images ) - response = await client.make_request(call) + response = await client.forward_request(call) await _remember(hass, call, start, response) return response @@ -310,7 +310,7 @@ async def stream_analyzer(data_call): expose_images=call.expose_images ) - response = await client.make_request(call) + response = await client.forward_request(call) await _remember(hass, call, start, response) return response @@ -357,7 +357,7 @@ def is_number(s): target_width=call.target_width, include_filename=call.include_filename ) - response = await client.make_request(call) + response = await client.forward_request(call) _LOGGER.info(f"Response: {response}") # udpate sensor in data_call.data.get("sensor_entity") await _update_sensor(hass, sensor_entity, response["response_text"]) diff --git a/custom_components/llmvision/request_handlers.py b/custom_components/llmvision/providers.py similarity index 50% rename from custom_components/llmvision/request_handlers.py rename to custom_components/llmvision/providers.py index 94d9b39..f2772ad 100644 --- a/custom_components/llmvision/request_handlers.py +++ b/custom_components/llmvision/providers.py @@ -1,3 +1,4 @@ +from abc import ABC, abstractmethod from homeassistant.exceptions import ServiceValidationError from homeassistant.helpers.aiohttp_client import async_get_clientsession import logging @@ -88,8 +89,6 @@ def default_model(provider): return { class RequestHandler: - """class to handle requests to AI providers""" - def __init__(self, hass, message, max_tokens, temperature, detail): self.session = async_get_clientsession(hass) self.hass = hass @@ -100,8 +99,7 @@ def __init__(self, hass, message, max_tokens, temperature, detail): self.base64_images = [] self.filenames = [] - async def make_request(self, call): - """Forward request to providers""" + async def forward_request(self, call): entry_id = call.provider provider = get_provider(self.hass, entry_id) _LOGGER.info(f"Provider from call: {provider}") @@ -110,78 +108,69 @@ async def make_request(self, call): if provider == 'OpenAI': api_key = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_OPENAI_API_KEY) - self._validate_call(provider=provider, - api_key=api_key, - base64_images=self.base64_images) - response_text = await self.openai(model=model, api_key=api_key) + self._validate_call( + provider=provider, api_key=api_key, base64_images=self.base64_images) + request = OpenAI(self.session, model, self.max_tokens, self.temperature, + self.message, self.base64_images, self.filenames, self.detail) + response_text = await request.vision_request(api_key, ENDPOINT_OPENAI) elif provider == 'Anthropic': api_key = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_ANTHROPIC_API_KEY) - self._validate_call(provider=provider, - api_key=api_key, - base64_images=self.base64_images) - response_text = await self.anthropic(model=model, api_key=api_key) + self._validate_call( + provider=provider, api_key=api_key, base64_images=self.base64_images) + request = Anthropic(self.session, model, self.max_tokens, self.temperature, + self.message, self.base64_images, self.filenames, self.detail) + response_text = await request.vision_request(api_key, ENDPOINT_ANTHROPIC) elif provider == 'Google': api_key = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_GOOGLE_API_KEY) - self._validate_call(provider=provider, - api_key=api_key, - base64_images=self.base64_images) - response_text = await self.google(model=model, api_key=api_key) + self._validate_call( + provider=provider, api_key=api_key, base64_images=self.base64_images) + request = Google(self.session, model, self.max_tokens, self.temperature, + self.message, self.base64_images, self.filenames, self.detail) + response_text = await request.vision_request(api_key, ENDPOINT_GOOGLE) elif provider == 'Groq': api_key = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_GROQ_API_KEY) - self._validate_call(provider=provider, - api_key=api_key, - base64_images=self.base64_images) - response_text = await self.groq(model=model, api_key=api_key) + self._validate_call( + provider=provider, api_key=api_key, base64_images=self.base64_images) + request = Groq(self.session, model, self.max_tokens, self.temperature, + self.message, self.base64_images, self.filenames, self.detail) + response_text = await request.vision_request(api_key, ENDPOINT_GROQ) elif provider == 'LocalAI': - ip_address = self.hass.data.get( - DOMAIN).get( + ip_address = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_LOCALAI_IP_ADDRESS) - port = self.hass.data.get( - DOMAIN).get( + port = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_LOCALAI_PORT) - https = self.hass.data.get( - DOMAIN).get( + https = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_LOCALAI_HTTPS, False) - self._validate_call(provider=provider, - api_key=None, - base64_images=self.base64_images, - ip_address=ip_address, - port=port) - response_text = await self.localai(model=model, - ip_address=ip_address, - port=port, - https=https) + self._validate_call(provider=provider, api_key=None, + base64_images=self.base64_images, ip_address=ip_address, port=port) + request = LocalAI(self.session, model, self.max_tokens, self.temperature, + self.message, self.base64_images, self.filenames, self.detail) + response_text = await request.vision_request(ip_address=ip_address, port=port, https=https) elif provider == 'Ollama': - ip_address = self.hass.data.get( - DOMAIN).get( + ip_address = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_OLLAMA_IP_ADDRESS) port = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_OLLAMA_PORT) https = self.hass.data.get(DOMAIN).get( - entry_id).get( - CONF_OLLAMA_HTTPS, False) - self._validate_call(provider=provider, - api_key=None, - base64_images=self.base64_images, - ip_address=ip_address, - port=port) - response_text = await self.ollama(model=model, - ip_address=ip_address, - port=port, - https=https) + entry_id).get(CONF_OLLAMA_HTTPS, False) + self._validate_call(provider=provider, api_key=None, + base64_images=self.base64_images, ip_address=ip_address, port=port) + request = Ollama(self.session, model, self.max_tokens, self.temperature, + self.message, self.base64_images, self.filenames, self.detail) + response_text = await request.vision_request(ip_address=ip_address, port=port, https=https) elif provider == 'Custom OpenAI': api_key = self.hass.data.get(DOMAIN).get( - entry_id).get( - CONF_CUSTOM_OPENAI_API_KEY, "") + entry_id).get(CONF_CUSTOM_OPENAI_API_KEY, "") endpoint = self.hass.data.get(DOMAIN).get(entry_id).get( CONF_CUSTOM_OPENAI_ENDPOINT) + "/v1/chat/completions" - self._validate_call(provider=provider, - api_key=api_key, - base64_images=self.base64_images) - response_text = await self.openai(model=model, api_key=api_key, endpoint=endpoint) + self._validate_call( + provider=provider, api_key=api_key, base64_images=self.base64_images) + request = OpenAI(self.session, model, self.max_tokens, self.temperature, + self.message, self.base64_images, self.filenames, self.detail) + response_text = await request.vision_request(api_key, endpoint) else: raise ServiceValidationError("invalid_provider") return {"response_text": response_text} @@ -190,313 +179,359 @@ def add_frame(self, base64_image, filename): self.base64_images.append(base64_image) self.filenames.append(filename) - # Request Handlers - async def openai(self, model, api_key, endpoint=ENDPOINT_OPENAI): - # Set headers and payload - headers = {'Content-type': 'application/json', - 'Authorization': 'Bearer ' + api_key} - data = {"model": model, - "messages": [{"role": "user", "content": [ - ]}], - "max_tokens": self.max_tokens, - "temperature": self.temperature - } + def _validate_call(self, provider, api_key, base64_images, ip_address=None, port=None): + """Validate the service call data""" + # Checks for OpenAI + if provider == 'OpenAI': + if not api_key: + raise ServiceValidationError(ERROR_OPENAI_NOT_CONFIGURED) + # Checks for Anthropic + elif provider == 'Anthropic': + if not api_key: + raise ServiceValidationError(ERROR_ANTHROPIC_NOT_CONFIGURED) + elif provider == 'Google': + if not api_key: + raise ServiceValidationError(ERROR_GOOGLE_NOT_CONFIGURED) + # Checks for Groq + elif provider == 'Groq': + if not api_key: + raise ServiceValidationError(ERROR_GROQ_NOT_CONFIGURED) + if len(base64_images) > 1: + raise ServiceValidationError(ERROR_GROQ_MULTIPLE_IMAGES) + # Checks for LocalAI + elif provider == 'LocalAI': + if not ip_address or not port: + raise ServiceValidationError(ERROR_LOCALAI_NOT_CONFIGURED) + # Checks for Ollama + elif provider == 'Ollama': + if not ip_address or not port: + raise ServiceValidationError(ERROR_OLLAMA_NOT_CONFIGURED) + elif provider == 'Custom OpenAI': + pass + else: + raise ServiceValidationError( + "Invalid provider selected. The event calendar cannot be used for analysis.") + # Check media input + if base64_images == []: + raise ServiceValidationError(ERROR_NO_IMAGE_INPUT) - # Add the images to the request - for image, filename in zip(self.base64_images, self.filenames): - tag = ("Image " + str(self.base64_images.index(image) + 1) - ) if filename == "" else filename - data["messages"][0]["content"].append( - {"type": "text", "text": tag + ":"}) - data["messages"][0]["content"].append( - {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image}", "detail": self.detail}}) + async def _resolve_error(self, response, provider): + """Translate response status to error message""" + import json + full_response_text = await response.text() + _LOGGER.info(f"[INFO] Full Response: {full_response_text}") - # append the message to the end of the request - data["messages"][0]["content"].append( - {"type": "text", "text": self.message} - ) + try: + response_json = json.loads(full_response_text) + if provider == 'anthropic': + error_info = response_json.get('error', {}) + error_message = f"{error_info.get('type', 'Unknown error')}: {error_info.get('message', 'Unknown error')}" + elif provider == 'ollama': + error_message = response_json.get('error', 'Unknown error') + else: + error_info = response_json.get('error', {}) + error_message = error_info.get('message', 'Unknown error') + except json.JSONDecodeError: + error_message = 'Unknown error' + + return error_message + + +class Provider(ABC): + def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames, detail): + self.session = session + self.model = model + self.max_tokens = max_tokens + self.temperature = temperature + self.message = message + self.base64_images = base64_images + self.filenames = filenames + self.detail = detail + + @abstractmethod + async def vision_request(self, api_key, endpoint): + pass + + @abstractmethod + async def text_request(self, api_key, endpoint): + pass + + async def vision_request(self, api_key: str, endpoint: str, **kwargs) -> str: + data = self._prepare_vision_data() + return await self._make_request(api_key, endpoint, data, **kwargs) + + async def text_request(self, api_key: str, endpoint: str, **kwargs) -> str: + data = self._prepare_text_data() + return await self._make_request(api_key, endpoint, data, **kwargs) + + async def _post(self, url, headers, data): + """Post data to url and return response data""" + _LOGGER.info(f"Request data: {sanitize_data(data)}") + + try: + response = await self.session.post(url, headers=headers, json=data) + except Exception as e: + raise ServiceValidationError(f"Request failed: {e}") + + if response.status != 200: + frame = inspect.stack()[1] + provider = frame.frame.f_locals["self"].__class__.__name__.lower() + parsed_response = await self._resolve_error(response, provider) + raise ServiceValidationError(parsed_response) + else: + response_data = await response.json() + _LOGGER.info(f"Response data: {response_data}") + return response_data - response = await self._post( - url=endpoint, headers=headers, data=data) + async def _resolve_error(self, response, provider): + """Translate response status to error message""" + import json + full_response_text = await response.text() + _LOGGER.info(f"[INFO] Full Response: {full_response_text}") + try: + response_json = json.loads(full_response_text) + if provider == 'anthropic': + error_info = response_json.get('error', {}) + error_message = f"{error_info.get('type', 'Unknown error')}: {error_info.get('message', 'Unknown error')}" + elif provider == 'ollama': + error_message = response_json.get('error', 'Unknown error') + else: + error_info = response_json.get('error', {}) + error_message = error_info.get('message', 'Unknown error') + except json.JSONDecodeError: + error_message = 'Unknown error' + + return error_message + + async def _fetch(self, url, max_retries=2, retry_delay=1): + """Fetch image from url and return image data""" + retries = 0 + while retries < max_retries: + _LOGGER.info( + f"Fetching {url} (attempt {retries + 1}/{max_retries})") + try: + response = await self.session.get(url) + if response.status != 200: + _LOGGER.warning( + f"Couldn't fetch frame (status code: {response.status})") + retries += 1 + await asyncio.sleep(retry_delay) + continue + data = await response.read() + return data + except Exception as e: + _LOGGER.error(f"Fetch failed: {e}") + retries += 1 + await asyncio.sleep(retry_delay) + _LOGGER.warning(f"Failed to fetch {url} after {max_retries} retries") + return None + + +class OpenAI(Provider): + def _generate_headers(self, api_key: str) -> dict: + return {'Content-type': 'application/json', 'Authorization': 'Bearer ' + api_key} + + async def _make_request(self, api_key: str, endpoint: str, data: dict) -> str: + headers = self._generate_headers(api_key) + response = await self._post(url=endpoint, headers=headers, data=data) response_text = response.get( "choices")[0].get("message").get("content") return response_text - async def anthropic(self, model, api_key): - # Set headers and payload - headers = {'content-type': 'application/json', - 'x-api-key': api_key, - 'anthropic-version': VERSION_ANTHROPIC} - data = {"model": model, - "messages": [ - {"role": "user", "content": []} - ], - "max_tokens": self.max_tokens, - "temperature": self.temperature - } - - # Add the images to the request + def _prepare_vision_data(self) -> dict: + data = {"model": self.model, "messages": [{"role": "user", "content": [ + ]}], "max_tokens": self.max_tokens, "temperature": self.temperature} for image, filename in zip(self.base64_images, self.filenames): tag = ("Image " + str(self.base64_images.index(image) + 1) - ) if filename == "" or not filename else filename - data["messages"][0]["content"].append( - { - "type": "text", - "text": tag + ":" - }) + ) if filename == "" else filename data["messages"][0]["content"].append( - {"type": "image", "source": - {"type": "base64", - "media_type": "image/jpeg", - "data": f"{image}" - } - } - ) - - # append the message to the end of the request + {"type": "text", "text": tag + ":"}) + data["messages"][0]["content"].append({"type": "image_url", "image_url": { + "url": f"data:image/jpeg;base64,{image}", "detail": self.detail}}) data["messages"][0]["content"].append( - {"type": "text", "text": self.message} - ) + {"type": "text", "text": self.message}) + return data - response = await self._post( - url=ENDPOINT_ANTHROPIC, headers=headers, data=data) + def _prepare_text_data(self) -> dict: + return { + "model": self.model, + "messages": [{"role": "user", "content": [{"type": "text", "text": self.message}]}], + "max_tokens": self.max_tokens, + "temperature": self.temperature + } + +class Anthropic(Provider): + def _generate_headers(self, api_key: str) -> dict: + return { + 'content-type': 'application/json', + 'x-api-key': api_key, + 'anthropic-version': VERSION_ANTHROPIC + } + + async def _make_request(self, api_key: str, endpoint: str, data: dict) -> str: + headers = self._generate_headers(api_key) + response = await self._post(url=endpoint, headers=headers, data=data) response_text = response.get("content")[0].get("text") return response_text - async def google(self, model, api_key): - # Set headers and payload - headers = {'content-type': 'application/json'} - data = {"contents": [ - ], - "generationConfig": { - "maxOutputTokens": self.max_tokens, - "temperature": self.temperature - } + def _prepare_vision_data(self) -> dict: + data = { + "model": self.model, + "messages": [{"role": "user", "content": []}], + "max_tokens": self.max_tokens, + "temperature": self.temperature } - - # Add the images to the request for image, filename in zip(self.base64_images, self.filenames): tag = ("Image " + str(self.base64_images.index(image) + 1) - ) if filename == "" or not filename else filename - data["contents"].append( - { - "role": "user", - "parts": [ - { - "text": tag + ":" - }, - { - "inline_data": { - "mime_type": "image/jpeg", - "data": image - } - } - ] - } - ) + ) if filename == "" else filename + data["messages"][0]["content"].append( + {"type": "text", "text": tag + ":"}) + data["messages"][0]["content"].append({"type": "image", "source": { + "type": "base64", "media_type": "image/jpeg", "data": f"{image}"}}) + data["messages"][0]["content"].append( + {"type": "text", "text": self.message}) + return data - # append the message to the end of the request - data["contents"].append( - {"role": "user", - "parts": [{"text": self.message} - ] - } - ) + def _prepare_text_data(self) -> dict: + return { + "model": self.model, + "messages": [{"role": "user", "content": [{"type": "text", "text": self.message}]}], + "max_tokens": self.max_tokens, + "temperature": self.temperature + } - response = await self._post( - url=ENDPOINT_GOOGLE.format(model=model, api_key=api_key), headers=headers, data=data) +class Google(Provider): + def _generate_headers(self) -> dict: + return {'content-type': 'application/json'} + + async def _make_request(self, api_key: str, endpoint: str, data: dict) -> str: + headers = self._generate_headers() + response = await self._post(url=endpoint.format(model=self.model, api_key=api_key), headers=headers, data=data) response_text = response.get("candidates")[0].get( "content").get("parts")[0].get("text") return response_text - async def groq(self, model, api_key, endpoint=ENDPOINT_GROQ): + def _prepare_vision_data(self) -> dict: + data = {"contents": [], "generationConfig": { + "maxOutputTokens": self.max_tokens, "temperature": self.temperature}} + for image, filename in zip(self.base64_images, self.filenames): + tag = ("Image " + str(self.base64_images.index(image) + 1) + ) if filename == "" else filename + data["contents"].append({"role": "user", "parts": [ + {"text": tag + ":"}, {"inline_data": {"mime_type": "image/jpeg", "data": image}}]}) + data["contents"].append( + {"role": "user", "parts": [{"text": self.message}]}) + return data + + def _prepare_text_data(self) -> dict: + return { + "contents": [{"role": "user", "parts": [{"text": self.message + ":"}]}], + "generationConfig": {"maxOutputTokens": self.max_tokens, "temperature": self.temperature} + } + + +class Groq(Provider): + def _generate_headers(self, api_key: str) -> dict: + return {'Content-type': 'application/json', 'Authorization': 'Bearer ' + api_key} + + async def _make_request(self, api_key: str, endpoint: str, data: dict) -> str: + headers = self._generate_headers(api_key) + response = await self._post(url=endpoint, headers=headers, data=data) + response_text = response.get( + "choices")[0].get("message").get("content") + return response_text + + def _prepare_vision_data(self) -> dict: first_image = self.base64_images[0] - # Set headers and payload - headers = {'Content-type': 'application/json', - 'Authorization': 'Bearer ' + api_key} data = { "messages": [ { "role": "user", "content": [ {"type": "text", "text": self.message}, - { - "type": "image_url", - "image_url": {"url": f"data:image/jpeg;base64,{first_image}"} - } + {"type": "image_url", "image_url": { + "url": f"data:image/jpeg;base64,{first_image}"}} ] } ], - "model": model + "model": self.model } + return data - response = await self._post( - url=endpoint, headers=headers, data=data) + def _prepare_text_data(self) -> dict: + return { + "messages": [ + { + "role": "user", + "content": [ + {"type": "text", "text": self.message} + ] + } + ], + "model": self.model + } - print(response) +class LocalAI(Provider): + async def _make_request(self, api_key: str, endpoint: str, data: dict, ip_address: str, port: int, https: bool) -> str: + headers = self._generate_headers() + protocol = "https" if https else "http" + response = await self._post(url=endpoint.format(ip_address=ip_address, port=port, protocol=protocol), headers=headers, data=data) response_text = response.get( "choices")[0].get("message").get("content") return response_text - async def localai(self, model, ip_address, port, https): - data = {"model": model, - "messages": [{"role": "user", "content": [ - ]}], - "max_tokens": self.max_tokens, - "temperature": self.temperature - } + def _prepare_vision_data(self) -> dict: + data = {"model": self.model, "messages": [{"role": "user", "content": [ + ]}], "max_tokens": self.max_tokens, "temperature": self.temperature} for image, filename in zip(self.base64_images, self.filenames): tag = ("Image " + str(self.base64_images.index(image) + 1) - ) if filename == "" or not filename else filename + ) if filename == "" else filename data["messages"][0]["content"].append( {"type": "text", "text": tag + ":"}) data["messages"][0]["content"].append( {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image}"}}) - - # append the message to the end of the request data["messages"][0]["content"].append( - {"type": "text", "text": self.message} - ) - - protocol = "https" if https else "http" - response = await self._post( - url=ENDPOINT_LOCALAI.format(ip_address=ip_address, port=port, protocol=protocol), headers={}, data=data) - - response_text = response.get( - "choices")[0].get("message").get("content") - return response_text + {"type": "text", "text": self.message}) + return data - async def ollama(self, model, ip_address, port, https): - data = { - "model": model, - "messages": [], - "stream": False, - "options": { - "num_predict": self.max_tokens, - "temperature": self.temperature - } + def _prepare_text_data(self) -> dict: + return { + "model": self.model, + "messages": [{"role": "user", "content": [{"type": "text", "text": self.message}]}], + "max_tokens": self.max_tokens, + "temperature": self.temperature } - for image, filename in zip(self.base64_images, self.filenames): - tag = ("Image " + str(self.base64_images.index(image) + 1) - ) if filename == "" or not filename else filename - image_message = { - "role": "user", - "content": tag + ":", - "images": [image] - } - data["messages"].append(image_message) - # append to the end of the request - prompt_message = { - "role": "user", - "content": self.message - } - data["messages"].append(prompt_message) +class Ollama(Provider): + async def _make_request(self, api_key: str, endpoint: str, data: dict, ip_address: str, port: int, https: bool) -> str: + headers = {} protocol = "https" if https else "http" - response = await self._post(url=ENDPOINT_OLLAMA.format(ip_address=ip_address, port=port, protocol=protocol), headers={}, data=data) + response = await self._post(url=endpoint.format(ip_address=ip_address, port=port, protocol=protocol), headers=headers, data=data) response_text = response.get("message").get("content") return response_text - # Helpers - async def _post(self, url, headers, data): - """Post data to url and return response data""" - _LOGGER.info(f"Request data: {sanitize_data(data)}") - - try: - response = await self.session.post(url, headers=headers, json=data) - except Exception as e: - raise ServiceValidationError(f"Request failed: {e}") - - if response.status != 200: - provider = inspect.stack()[1].function - parsed_response = await self._resolve_error(response, provider) - raise ServiceValidationError(parsed_response) - else: - response_data = await response.json() - _LOGGER.info(f"Response data: {response_data}") - return response_data - - async def _fetch(self, url, max_retries=2, retry_delay=1): - """Fetch image from url and return image data""" - retries = 0 - while retries < max_retries: - _LOGGER.info( - f"Fetching {url} (attempt {retries + 1}/{max_retries})") - try: - response = await self.session.get(url) - if response.status != 200: - _LOGGER.warning( - f"Couldn't fetch frame (status code: {response.status})") - retries += 1 - await asyncio.sleep(retry_delay) - continue - data = await response.read() - return data - except Exception as e: - _LOGGER.error(f"Fetch failed: {e}") - retries += 1 - await asyncio.sleep(retry_delay) - _LOGGER.warning(f"Failed to fetch {url} after {max_retries} retries") - return None - - def _validate_call(self, provider, api_key, base64_images, ip_address=None, port=None): - """Validate the service call data""" - # Checks for OpenAI - if provider == 'OpenAI': - if not api_key: - raise ServiceValidationError(ERROR_OPENAI_NOT_CONFIGURED) - # Checks for Anthropic - elif provider == 'Anthropic': - if not api_key: - raise ServiceValidationError(ERROR_ANTHROPIC_NOT_CONFIGURED) - elif provider == 'Google': - if not api_key: - raise ServiceValidationError(ERROR_GOOGLE_NOT_CONFIGURED) - # Checks for Groq - elif provider == 'Groq': - if not api_key: - raise ServiceValidationError(ERROR_GROQ_NOT_CONFIGURED) - if len(base64_images) > 1: - raise ServiceValidationError(ERROR_GROQ_MULTIPLE_IMAGES) - # Checks for LocalAI - elif provider == 'LocalAI': - if not ip_address or not port: - raise ServiceValidationError(ERROR_LOCALAI_NOT_CONFIGURED) - # Checks for Ollama - elif provider == 'Ollama': - if not ip_address or not port: - raise ServiceValidationError(ERROR_OLLAMA_NOT_CONFIGURED) - elif provider == 'Custom OpenAI': - pass - else: - raise ServiceValidationError( - "Invalid provider selected. The event calendar cannot be used for analysis.") - # Check media input - if base64_images == []: - raise ServiceValidationError(ERROR_NO_IMAGE_INPUT) - - async def _resolve_error(self, response, provider): - """Translate response status to error message""" - import json - full_response_text = await response.text() - _LOGGER.info(f"[INFO] Full Response: {full_response_text}") - - try: - response_json = json.loads(full_response_text) - if provider == 'anthropic': - error_info = response_json.get('error', {}) - error_message = f"{error_info.get('type', 'Unknown error')}: {error_info.get('message', 'Unknown error')}" - elif provider == 'ollama': - error_message = response_json.get('error', 'Unknown error') - else: - error_info = response_json.get('error', {}) - error_message = error_info.get('message', 'Unknown error') - except json.JSONDecodeError: - error_message = 'Unknown error' + def _prepare_vision_data(self) -> dict: + data = {"model": self.model, "messages": [], "stream": False, "options": { + "num_predict": self.max_tokens, "temperature": self.temperature}} + for image, filename in zip(self.base64_images, self.filenames): + tag = ("Image " + str(self.base64_images.index(image) + 1) + ) if filename == "" else filename + image_message = {"role": "user", + "content": tag + ":", "images": [image]} + data["messages"].append(image_message) + prompt_message = {"role": "user", "content": self.message} + data["messages"].append(prompt_message) + return data - return error_message + def _prepare_text_data(self) -> dict: + return { + "model": self.model, + "messages": [{"role": "user", "content": self.message}], + "stream": False, + "options": {"num_predict": self.max_tokens, "temperature": self.temperature} + } From 02a153452635c3fd601da951d16edd997a040285 Mon Sep 17 00:00:00 2001 From: valentinfrlch Date: Tue, 12 Nov 2024 08:56:50 +0100 Subject: [PATCH 2/9] WIP: Refactoring providers into classes --- custom_components/llmvision/__init__.py | 2 +- custom_components/llmvision/providers.py | 103 ++++++++++++++++++----- 2 files changed, 82 insertions(+), 23 deletions(-) diff --git a/custom_components/llmvision/__init__.py b/custom_components/llmvision/__init__.py index fbbd33c..b168bd8 100644 --- a/custom_components/llmvision/__init__.py +++ b/custom_components/llmvision/__init__.py @@ -110,7 +110,7 @@ async def async_remove_entry(hass, entry): if entry_uid in hass.data[DOMAIN]: # Remove the entry from hass.data _LOGGER.info(f"Removing {entry.title} from hass.data") - async_unload_entry(hass, entry) + await async_unload_entry(hass, entry) hass.data[DOMAIN].pop(entry_uid) else: _LOGGER.warning( diff --git a/custom_components/llmvision/providers.py b/custom_components/llmvision/providers.py index f2772ad..9acecbc 100644 --- a/custom_components/llmvision/providers.py +++ b/custom_components/llmvision/providers.py @@ -85,7 +85,7 @@ def default_model(provider): return { "LocalAI": "gpt-4-vision-preview", "Ollama": "llava-phi3:latest", "Custom OpenAI": "gpt-4o-mini" -}.get(provider, "gpt-4o-mini") # Default value if provider is not found +}.get(provider, "gpt-4o-mini") # Default value class RequestHandler: @@ -108,35 +108,39 @@ async def forward_request(self, call): if provider == 'OpenAI': api_key = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_OPENAI_API_KEY) + self._validate_call( provider=provider, api_key=api_key, base64_images=self.base64_images) request = OpenAI(self.session, model, self.max_tokens, self.temperature, self.message, self.base64_images, self.filenames, self.detail) - response_text = await request.vision_request(api_key, ENDPOINT_OPENAI) + response_text = await request.vision_request(api_key=api_key, endpoint=ENDPOINT_OPENAI) elif provider == 'Anthropic': api_key = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_ANTHROPIC_API_KEY) + self._validate_call( provider=provider, api_key=api_key, base64_images=self.base64_images) request = Anthropic(self.session, model, self.max_tokens, self.temperature, self.message, self.base64_images, self.filenames, self.detail) - response_text = await request.vision_request(api_key, ENDPOINT_ANTHROPIC) + response_text = await request.vision_request(api_key=api_key, endpoint=ENDPOINT_ANTHROPIC) elif provider == 'Google': api_key = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_GOOGLE_API_KEY) + self._validate_call( provider=provider, api_key=api_key, base64_images=self.base64_images) request = Google(self.session, model, self.max_tokens, self.temperature, self.message, self.base64_images, self.filenames, self.detail) - response_text = await request.vision_request(api_key, ENDPOINT_GOOGLE) + response_text = await request.vision_request(api_key=api_key, endpoint=ENDPOINT_GOOGLE) elif provider == 'Groq': api_key = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_GROQ_API_KEY) + self._validate_call( provider=provider, api_key=api_key, base64_images=self.base64_images) request = Groq(self.session, model, self.max_tokens, self.temperature, self.message, self.base64_images, self.filenames, self.detail) - response_text = await request.vision_request(api_key, ENDPOINT_GROQ) + response_text = await request.vision_request(api_key=api_key, endpoint=ENDPOINT_GROQ) elif provider == 'LocalAI': ip_address = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_LOCALAI_IP_ADDRESS) @@ -144,11 +148,12 @@ async def forward_request(self, call): entry_id).get(CONF_LOCALAI_PORT) https = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_LOCALAI_HTTPS, False) + self._validate_call(provider=provider, api_key=None, base64_images=self.base64_images, ip_address=ip_address, port=port) request = LocalAI(self.session, model, self.max_tokens, self.temperature, self.message, self.base64_images, self.filenames, self.detail) - response_text = await request.vision_request(ip_address=ip_address, port=port, https=https) + response_text = await request.vision_request(endpoint=ENDPOINT_LOCALAI, ip_address=ip_address, port=port, https=https) elif provider == 'Ollama': ip_address = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_OLLAMA_IP_ADDRESS) @@ -160,7 +165,7 @@ async def forward_request(self, call): base64_images=self.base64_images, ip_address=ip_address, port=port) request = Ollama(self.session, model, self.max_tokens, self.temperature, self.message, self.base64_images, self.filenames, self.detail) - response_text = await request.vision_request(ip_address=ip_address, port=port, https=https) + response_text = await request.vision_request(endpoint=ENDPOINT_OLLAMA, ip_address=ip_address, port=port, https=https) elif provider == 'Custom OpenAI': api_key = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_CUSTOM_OPENAI_API_KEY, "") @@ -249,26 +254,26 @@ def __init__(self, session, model, max_tokens, temperature, message, base64_imag self.detail = detail @abstractmethod - async def vision_request(self, api_key, endpoint): - pass - - @abstractmethod - async def text_request(self, api_key, endpoint): + async def _make_request(self, **kwargs) -> str: pass - async def vision_request(self, api_key: str, endpoint: str, **kwargs) -> str: + async def vision_request(self, **kwargs) -> str: data = self._prepare_vision_data() - return await self._make_request(api_key, endpoint, data, **kwargs) + kwargs["data"] = data + _LOGGER.info(f"kwargs: {kwargs.items()}") + return await self._make_request(**kwargs) - async def text_request(self, api_key: str, endpoint: str, **kwargs) -> str: + async def text_request(self, **kwargs) -> str: data = self._prepare_text_data() - return await self._make_request(api_key, endpoint, data, **kwargs) + kwargs["data"] = data + return await self._make_request(**kwargs) async def _post(self, url, headers, data): """Post data to url and return response data""" _LOGGER.info(f"Request data: {sanitize_data(data)}") try: + _LOGGER.info(f"Posting to {url} with headers {headers}") response = await self.session.post(url, headers=headers, json=data) except Exception as e: raise ServiceValidationError(f"Request failed: {e}") @@ -329,10 +334,18 @@ async def _fetch(self, url, max_retries=2, retry_delay=1): class OpenAI(Provider): + def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames, detail): + super().__init__(session, model, max_tokens, temperature, + message, base64_images, filenames, detail) + def _generate_headers(self, api_key: str) -> dict: return {'Content-type': 'application/json', 'Authorization': 'Bearer ' + api_key} - async def _make_request(self, api_key: str, endpoint: str, data: dict) -> str: + async def _make_request(self, **kwargs) -> str: + api_key = kwargs.get("api_key") + endpoint = kwargs.get("endpoint") + data = kwargs.get("data") + headers = self._generate_headers(api_key) response = await self._post(url=endpoint, headers=headers, data=data) response_text = response.get( @@ -363,6 +376,10 @@ def _prepare_text_data(self) -> dict: class Anthropic(Provider): + def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames, detail): + super().__init__(session, model, max_tokens, temperature, + message, base64_images, filenames, detail) + def _generate_headers(self, api_key: str) -> dict: return { 'content-type': 'application/json', @@ -370,7 +387,11 @@ def _generate_headers(self, api_key: str) -> dict: 'anthropic-version': VERSION_ANTHROPIC } - async def _make_request(self, api_key: str, endpoint: str, data: dict) -> str: + async def _make_request(self, **kwargs) -> str: + api_key = kwargs.get("api_key") + endpoint = kwargs.get("endpoint") + data = kwargs.get("data") + headers = self._generate_headers(api_key) response = await self._post(url=endpoint, headers=headers, data=data) response_text = response.get("content")[0].get("text") @@ -404,10 +425,18 @@ def _prepare_text_data(self) -> dict: class Google(Provider): + def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames, detail): + super().__init__(session, model, max_tokens, temperature, + message, base64_images, filenames, detail) + def _generate_headers(self) -> dict: return {'content-type': 'application/json'} - async def _make_request(self, api_key: str, endpoint: str, data: dict) -> str: + async def _make_request(self, **kwargs) -> str: + api_key = kwargs.get("api_key") + endpoint = kwargs.get("endpoint") + data = kwargs.get("data") + headers = self._generate_headers() response = await self._post(url=endpoint.format(model=self.model, api_key=api_key), headers=headers, data=data) response_text = response.get("candidates")[0].get( @@ -434,10 +463,18 @@ def _prepare_text_data(self) -> dict: class Groq(Provider): + def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames, detail): + super().__init__(session, model, max_tokens, temperature, + message, base64_images, filenames, detail) + def _generate_headers(self, api_key: str) -> dict: return {'Content-type': 'application/json', 'Authorization': 'Bearer ' + api_key} - async def _make_request(self, api_key: str, endpoint: str, data: dict) -> str: + async def _make_request(self, **kwargs) -> str: + api_key = kwargs.get("api_key") + endpoint = kwargs.get("endpoint") + data = kwargs.get("data") + headers = self._generate_headers(api_key) response = await self._post(url=endpoint, headers=headers, data=data) response_text = response.get( @@ -476,7 +513,17 @@ def _prepare_text_data(self) -> dict: class LocalAI(Provider): - async def _make_request(self, api_key: str, endpoint: str, data: dict, ip_address: str, port: int, https: bool) -> str: + def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames, detail): + super().__init__(session, model, max_tokens, temperature, + message, base64_images, filenames, detail) + + async def _make_request(self, **kwargs) -> str: + endpoint = kwargs.get("endpoint") + data = kwargs.get("data") + https = kwargs.get("https") + ip_address = kwargs.get("ip_address") + port = kwargs.get("port") + headers = self._generate_headers() protocol = "https" if https else "http" response = await self._post(url=endpoint.format(ip_address=ip_address, port=port, protocol=protocol), headers=headers, data=data) @@ -508,7 +555,19 @@ def _prepare_text_data(self) -> dict: class Ollama(Provider): - async def _make_request(self, api_key: str, endpoint: str, data: dict, ip_address: str, port: int, https: bool) -> str: + def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames, detail): + super().__init__(session, model, max_tokens, temperature, + message, base64_images, filenames, detail) + + async def _make_request(self, **kwargs) -> str: + endpoint = kwargs.get("endpoint") + data = kwargs.get("data") + https = kwargs.get("https") + ip_address = kwargs.get("ip_address") + port = kwargs.get("port") + + _LOGGER.info(f"endpoint: {endpoint} https: {https} ip_address: {ip_address} port: {port}") + headers = {} protocol = "https" if https else "http" response = await self._post(url=endpoint.format(ip_address=ip_address, port=port, protocol=protocol), headers=headers, data=data) From e4992007bfe396390a92b7f6d3d5b4384a5e47e6 Mon Sep 17 00:00:00 2001 From: valentinfrlch Date: Wed, 13 Nov 2024 14:44:53 +0100 Subject: [PATCH 3/9] Support Azure Open AI #64 Use openai python library --- custom_components/llmvision/__init__.py | 10 +- custom_components/llmvision/const.py | 1 - custom_components/llmvision/manifest.json | 3 +- custom_components/llmvision/providers.py | 243 ++++++++++------------ custom_components/llmvision/services.yaml | 44 ---- 5 files changed, 117 insertions(+), 184 deletions(-) diff --git a/custom_components/llmvision/__init__.py b/custom_components/llmvision/__init__.py index b168bd8..42fe6c6 100644 --- a/custom_components/llmvision/__init__.py +++ b/custom_components/llmvision/__init__.py @@ -28,7 +28,6 @@ DURATION, MAX_FRAMES, TEMPERATURE, - DETAIL, INCLUDE_FILENAME, EXPOSE_IMAGES, SENSOR_ENTITY, @@ -231,7 +230,6 @@ def __init__(self, data_call): self.target_width = data_call.data.get(TARGET_WIDTH, 3840) self.temperature = float(data_call.data.get(TEMPERATURE, 0.3)) self.max_tokens = int(data_call.data.get(MAXTOKENS, 100)) - self.detail = str(data_call.data.get(DETAIL, "auto")) self.include_filename = data_call.data.get(INCLUDE_FILENAME, False) self.expose_images = data_call.data.get(EXPOSE_IMAGES, False) self.sensor_entity = data_call.data.get(SENSOR_ENTITY) @@ -252,7 +250,7 @@ async def image_analyzer(data_call): message=call.message, max_tokens=call.max_tokens, temperature=call.temperature, - detail=call.detail) + ) # Fetch and preprocess images processor = MediaProcessor(hass, client) @@ -278,7 +276,7 @@ async def video_analyzer(data_call): message=call.message, max_tokens=call.max_tokens, temperature=call.temperature, - detail=call.detail) + ) processor = MediaProcessor(hass, client) client = await processor.add_videos(video_paths=call.video_paths, event_ids=call.event_id, @@ -300,7 +298,7 @@ async def stream_analyzer(data_call): message=call.message, max_tokens=call.max_tokens, temperature=call.temperature, - detail=call.detail) + ) processor = MediaProcessor(hass, client) client = await processor.add_streams(image_entities=call.image_entities, duration=call.duration, @@ -350,7 +348,7 @@ def is_number(s): message=message, max_tokens=call.max_tokens, temperature=call.temperature, - detail=call.detail) + ) processor = MediaProcessor(hass, client) client = await processor.add_visual_data(image_entities=call.image_entities, image_paths=call.image_paths, diff --git a/custom_components/llmvision/const.py b/custom_components/llmvision/const.py index 4bbd7f8..09b21e4 100644 --- a/custom_components/llmvision/const.py +++ b/custom_components/llmvision/const.py @@ -32,7 +32,6 @@ INTERVAL = 'interval' DURATION = 'duration' MAX_FRAMES = 'max_frames' -DETAIL = 'detail' TEMPERATURE = 'temperature' INCLUDE_FILENAME = 'include_filename' EXPOSE_IMAGES = 'expose_images' diff --git a/custom_components/llmvision/manifest.json b/custom_components/llmvision/manifest.json index aca2eb5..e6074bb 100644 --- a/custom_components/llmvision/manifest.json +++ b/custom_components/llmvision/manifest.json @@ -2,9 +2,10 @@ "domain": "llmvision", "name": "LLM Vision", "codeowners": ["@valentinfrlch"], + "requirements": ["openai==1.54.3"], "config_flow": true, "documentation": "https://github.com/valentinfrlch/ha-llmvision", "iot_class": "cloud_polling", "issue_tracker": "https://github.com/valentinfrlch/ha-llmvision/issues", - "version": "1.3.1" + "version": "1.3.2" } \ No newline at end of file diff --git a/custom_components/llmvision/providers.py b/custom_components/llmvision/providers.py index 9acecbc..aad7dc6 100644 --- a/custom_components/llmvision/providers.py +++ b/custom_components/llmvision/providers.py @@ -1,4 +1,5 @@ from abc import ABC, abstractmethod +import openai from homeassistant.exceptions import ServiceValidationError from homeassistant.helpers.aiohttp_client import async_get_clientsession import logging @@ -19,7 +20,6 @@ CONF_CUSTOM_OPENAI_ENDPOINT, CONF_CUSTOM_OPENAI_API_KEY, VERSION_ANTHROPIC, - ENDPOINT_OPENAI, ENDPOINT_ANTHROPIC, ENDPOINT_GOOGLE, ENDPOINT_LOCALAI, @@ -89,13 +89,12 @@ def default_model(provider): return { class RequestHandler: - def __init__(self, hass, message, max_tokens, temperature, detail): + def __init__(self, hass, message, max_tokens, temperature): self.session = async_get_clientsession(hass) self.hass = hass self.message = message self.max_tokens = max_tokens self.temperature = temperature - self.detail = detail self.base64_images = [] self.filenames = [] @@ -109,37 +108,29 @@ async def forward_request(self, call): api_key = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_OPENAI_API_KEY) - self._validate_call( - provider=provider, api_key=api_key, base64_images=self.base64_images) request = OpenAI(self.session, model, self.max_tokens, self.temperature, - self.message, self.base64_images, self.filenames, self.detail) - response_text = await request.vision_request(api_key=api_key, endpoint=ENDPOINT_OPENAI) + self.message, self.base64_images, self.filenames) + response_text = await request.vision_request(api_key=api_key) elif provider == 'Anthropic': api_key = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_ANTHROPIC_API_KEY) - self._validate_call( - provider=provider, api_key=api_key, base64_images=self.base64_images) request = Anthropic(self.session, model, self.max_tokens, self.temperature, - self.message, self.base64_images, self.filenames, self.detail) + self.message, self.base64_images, self.filenames) response_text = await request.vision_request(api_key=api_key, endpoint=ENDPOINT_ANTHROPIC) elif provider == 'Google': api_key = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_GOOGLE_API_KEY) - self._validate_call( - provider=provider, api_key=api_key, base64_images=self.base64_images) request = Google(self.session, model, self.max_tokens, self.temperature, - self.message, self.base64_images, self.filenames, self.detail) + self.message, self.base64_images, self.filenames) response_text = await request.vision_request(api_key=api_key, endpoint=ENDPOINT_GOOGLE) elif provider == 'Groq': api_key = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_GROQ_API_KEY) - self._validate_call( - provider=provider, api_key=api_key, base64_images=self.base64_images) request = Groq(self.session, model, self.max_tokens, self.temperature, - self.message, self.base64_images, self.filenames, self.detail) + self.message, self.base64_images, self.filenames) response_text = await request.vision_request(api_key=api_key, endpoint=ENDPOINT_GROQ) elif provider == 'LocalAI': ip_address = self.hass.data.get(DOMAIN).get( @@ -149,10 +140,8 @@ async def forward_request(self, call): https = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_LOCALAI_HTTPS, False) - self._validate_call(provider=provider, api_key=None, - base64_images=self.base64_images, ip_address=ip_address, port=port) request = LocalAI(self.session, model, self.max_tokens, self.temperature, - self.message, self.base64_images, self.filenames, self.detail) + self.message, self.base64_images, self.filenames) response_text = await request.vision_request(endpoint=ENDPOINT_LOCALAI, ip_address=ip_address, port=port, https=https) elif provider == 'Ollama': ip_address = self.hass.data.get(DOMAIN).get( @@ -161,20 +150,18 @@ async def forward_request(self, call): entry_id).get(CONF_OLLAMA_PORT) https = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_OLLAMA_HTTPS, False) - self._validate_call(provider=provider, api_key=None, - base64_images=self.base64_images, ip_address=ip_address, port=port) + request = Ollama(self.session, model, self.max_tokens, self.temperature, - self.message, self.base64_images, self.filenames, self.detail) + self.message, self.base64_images, self.filenames) response_text = await request.vision_request(endpoint=ENDPOINT_OLLAMA, ip_address=ip_address, port=port, https=https) elif provider == 'Custom OpenAI': api_key = self.hass.data.get(DOMAIN).get( entry_id).get(CONF_CUSTOM_OPENAI_API_KEY, "") endpoint = self.hass.data.get(DOMAIN).get(entry_id).get( CONF_CUSTOM_OPENAI_ENDPOINT) + "/v1/chat/completions" - self._validate_call( - provider=provider, api_key=api_key, base64_images=self.base64_images) + request = OpenAI(self.session, model, self.max_tokens, self.temperature, - self.message, self.base64_images, self.filenames, self.detail) + self.message, self.base64_images, self.filenames) response_text = await request.vision_request(api_key, endpoint) else: raise ServiceValidationError("invalid_provider") @@ -184,42 +171,6 @@ def add_frame(self, base64_image, filename): self.base64_images.append(base64_image) self.filenames.append(filename) - def _validate_call(self, provider, api_key, base64_images, ip_address=None, port=None): - """Validate the service call data""" - # Checks for OpenAI - if provider == 'OpenAI': - if not api_key: - raise ServiceValidationError(ERROR_OPENAI_NOT_CONFIGURED) - # Checks for Anthropic - elif provider == 'Anthropic': - if not api_key: - raise ServiceValidationError(ERROR_ANTHROPIC_NOT_CONFIGURED) - elif provider == 'Google': - if not api_key: - raise ServiceValidationError(ERROR_GOOGLE_NOT_CONFIGURED) - # Checks for Groq - elif provider == 'Groq': - if not api_key: - raise ServiceValidationError(ERROR_GROQ_NOT_CONFIGURED) - if len(base64_images) > 1: - raise ServiceValidationError(ERROR_GROQ_MULTIPLE_IMAGES) - # Checks for LocalAI - elif provider == 'LocalAI': - if not ip_address or not port: - raise ServiceValidationError(ERROR_LOCALAI_NOT_CONFIGURED) - # Checks for Ollama - elif provider == 'Ollama': - if not ip_address or not port: - raise ServiceValidationError(ERROR_OLLAMA_NOT_CONFIGURED) - elif provider == 'Custom OpenAI': - pass - else: - raise ServiceValidationError( - "Invalid provider selected. The event calendar cannot be used for analysis.") - # Check media input - if base64_images == []: - raise ServiceValidationError(ERROR_NO_IMAGE_INPUT) - async def _resolve_error(self, response, provider): """Translate response status to error message""" import json @@ -243,7 +194,7 @@ async def _resolve_error(self, response, provider): class Provider(ABC): - def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames, detail): + def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames): self.session = session self.model = model self.max_tokens = max_tokens @@ -251,24 +202,33 @@ def __init__(self, session, model, max_tokens, temperature, message, base64_imag self.message = message self.base64_images = base64_images self.filenames = filenames - self.detail = detail @abstractmethod async def _make_request(self, **kwargs) -> str: pass + @abstractmethod + def validate(self) -> bool: + pass + + def validate_images(self): + if not self.base64_images or len(self.base64_images) == 0: + raise ServiceValidationError(ERROR_NO_IMAGE_INPUT) + async def vision_request(self, **kwargs) -> str: - data = self._prepare_vision_data() - kwargs["data"] = data + self.validate_images() + self.validate(**kwargs) + kwargs["data"] = self._prepare_vision_data() _LOGGER.info(f"kwargs: {kwargs.items()}") return await self._make_request(**kwargs) async def text_request(self, **kwargs) -> str: - data = self._prepare_text_data() - kwargs["data"] = data + self.validate_images() + self.validate(**kwargs) + kwargs["data"] = self._prepare_text_data() return await self._make_request(**kwargs) - async def _post(self, url, headers, data): + async def _post(self, url, headers, data) -> dict: """Post data to url and return response data""" _LOGGER.info(f"Request data: {sanitize_data(data)}") @@ -288,27 +248,6 @@ async def _post(self, url, headers, data): _LOGGER.info(f"Response data: {response_data}") return response_data - async def _resolve_error(self, response, provider): - """Translate response status to error message""" - import json - full_response_text = await response.text() - _LOGGER.info(f"[INFO] Full Response: {full_response_text}") - - try: - response_json = json.loads(full_response_text) - if provider == 'anthropic': - error_info = response_json.get('error', {}) - error_message = f"{error_info.get('type', 'Unknown error')}: {error_info.get('message', 'Unknown error')}" - elif provider == 'ollama': - error_message = response_json.get('error', 'Unknown error') - else: - error_info = response_json.get('error', {}) - error_message = error_info.get('message', 'Unknown error') - except json.JSONDecodeError: - error_message = 'Unknown error' - - return error_message - async def _fetch(self, url, max_retries=2, retry_delay=1): """Fetch image from url and return image data""" retries = 0 @@ -330,55 +269,73 @@ async def _fetch(self, url, max_retries=2, retry_delay=1): retries += 1 await asyncio.sleep(retry_delay) _LOGGER.warning(f"Failed to fetch {url} after {max_retries} retries") - return None + + async def _resolve_error(self, response, provider) -> str: + """Translate response status to error message""" + import json + full_response_text = await response.text() + _LOGGER.info(f"[INFO] Full Response: {full_response_text}") + + try: + response_json = json.loads(full_response_text) + if provider == 'anthropic': + error_info = response_json.get('error', {}) + error_message = f"{error_info.get('type', 'Unknown error')}: {error_info.get('message', 'Unknown error')}" + elif provider == 'ollama': + error_message = response_json.get('error', 'Unknown error') + else: + error_info = response_json.get('error', {}) + error_message = error_info.get('message', 'Unknown error') + except json.JSONDecodeError: + error_message = 'Unknown error' + + return error_message class OpenAI(Provider): - def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames, detail): + def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames): super().__init__(session, model, max_tokens, temperature, - message, base64_images, filenames, detail) - - def _generate_headers(self, api_key: str) -> dict: - return {'Content-type': 'application/json', 'Authorization': 'Bearer ' + api_key} + message, base64_images, filenames) async def _make_request(self, **kwargs) -> str: - api_key = kwargs.get("api_key") - endpoint = kwargs.get("endpoint") - data = kwargs.get("data") - - headers = self._generate_headers(api_key) - response = await self._post(url=endpoint, headers=headers, data=data) - response_text = response.get( - "choices")[0].get("message").get("content") + openai.api_key = kwargs.get("api_key") + messages = kwargs.get("data") + if "endpoint" in kwargs: + openai.base_url = kwargs.get("endpoint") + + response = openai.chat.completions.create( + model=self.model, + messages=messages, + max_tokens=self.max_tokens, + temperature=self.temperature, + ) + + response_text = response.choices[0].message.content return response_text - def _prepare_vision_data(self) -> dict: - data = {"model": self.model, "messages": [{"role": "user", "content": [ - ]}], "max_tokens": self.max_tokens, "temperature": self.temperature} + def _prepare_vision_data(self) -> list: + messages = [{"role": "user", "content": []}] for image, filename in zip(self.base64_images, self.filenames): tag = ("Image " + str(self.base64_images.index(image) + 1) ) if filename == "" else filename - data["messages"][0]["content"].append( - {"type": "text", "text": tag + ":"}) - data["messages"][0]["content"].append({"type": "image_url", "image_url": { - "url": f"data:image/jpeg;base64,{image}", "detail": self.detail}}) - data["messages"][0]["content"].append( - {"type": "text", "text": self.message}) - return data - - def _prepare_text_data(self) -> dict: - return { - "model": self.model, - "messages": [{"role": "user", "content": [{"type": "text", "text": self.message}]}], - "max_tokens": self.max_tokens, - "temperature": self.temperature - } - + messages[0]["content"].append({"type": "text", "text": tag + ":"}) + messages[0]["content"].append({"type": "image_url", "image_url": { + "url": f"data:image/jpeg;base64,{image}"}}) + messages[0]["content"].append({"type": "text", "text": self.message}) + return messages + + def _prepare_text_data(self) -> list: + return [{"role": "user", "content": [{"type": "text", "text": self.message}]}] + + def validate(self, **kwargs): + if not kwargs.get("api_key"): + raise ServiceValidationError(ERROR_OPENAI_NOT_CONFIGURED) + class Anthropic(Provider): - def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames, detail): + def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames): super().__init__(session, model, max_tokens, temperature, - message, base64_images, filenames, detail) + message, base64_images, filenames) def _generate_headers(self, api_key: str) -> dict: return { @@ -423,11 +380,15 @@ def _prepare_text_data(self) -> dict: "temperature": self.temperature } + def validate(self, **kwargs): + if not kwargs.get("api_key"): + raise ServiceValidationError(ERROR_ANTHROPIC_NOT_CONFIGURED) + class Google(Provider): - def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames, detail): + def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames): super().__init__(session, model, max_tokens, temperature, - message, base64_images, filenames, detail) + message, base64_images, filenames) def _generate_headers(self) -> dict: return {'content-type': 'application/json'} @@ -460,12 +421,16 @@ def _prepare_text_data(self) -> dict: "contents": [{"role": "user", "parts": [{"text": self.message + ":"}]}], "generationConfig": {"maxOutputTokens": self.max_tokens, "temperature": self.temperature} } + + def validate(self, **kwargs): + if not kwargs.get("api_key"): + raise ServiceValidationError(ERROR_GOOGLE_NOT_CONFIGURED) class Groq(Provider): - def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames, detail): + def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames): super().__init__(session, model, max_tokens, temperature, - message, base64_images, filenames, detail) + message, base64_images, filenames) def _generate_headers(self, api_key: str) -> dict: return {'Content-type': 'application/json', 'Authorization': 'Bearer ' + api_key} @@ -510,12 +475,18 @@ def _prepare_text_data(self) -> dict: ], "model": self.model } + + def validate(self, **kwargs): + if not kwargs.get("api_key"): + raise ServiceValidationError(ERROR_GROQ_NOT_CONFIGURED) + if len(kwargs.get("base64_images")) > 1: + raise ServiceValidationError(ERROR_GROQ_MULTIPLE_IMAGES) class LocalAI(Provider): - def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames, detail): + def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames): super().__init__(session, model, max_tokens, temperature, - message, base64_images, filenames, detail) + message, base64_images, filenames) async def _make_request(self, **kwargs) -> str: endpoint = kwargs.get("endpoint") @@ -552,12 +523,16 @@ def _prepare_text_data(self) -> dict: "max_tokens": self.max_tokens, "temperature": self.temperature } + + def validate(self, **kwargs): + if not kwargs.get("ip_address") or not kwargs.get("port"): + raise ServiceValidationError(ERROR_LOCALAI_NOT_CONFIGURED) class Ollama(Provider): - def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames, detail): + def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames): super().__init__(session, model, max_tokens, temperature, - message, base64_images, filenames, detail) + message, base64_images, filenames) async def _make_request(self, **kwargs) -> str: endpoint = kwargs.get("endpoint") @@ -594,3 +569,7 @@ def _prepare_text_data(self) -> dict: "stream": False, "options": {"num_predict": self.max_tokens, "temperature": self.temperature} } + + def validate(self, **kwargs): + if not kwargs.get("ip_address") or not kwargs.get("port"): + raise ServiceValidationError(ERROR_OLLAMA_NOT_CONFIGURED) \ No newline at end of file diff --git a/custom_components/llmvision/services.yaml b/custom_components/llmvision/services.yaml index 78ad818..e6bfc64 100644 --- a/custom_components/llmvision/services.yaml +++ b/custom_components/llmvision/services.yaml @@ -68,17 +68,6 @@ image_analyzer: number: min: 512 max: 1920 - detail: - name: Detail - required: false - description: "Detail parameter. Leave empty for 'auto'" - default: 'low' - example: 'low' - selector: - select: - options: - - 'high' - - 'low' max_tokens: name: Maximum Tokens description: 'Maximum number of tokens to generate' @@ -189,17 +178,6 @@ video_analyzer: number: min: 512 max: 1920 - detail: - name: Detail - required: false - description: "Detail parameter, leave empty for 'auto'" - default: 'low' - example: 'low' - selector: - select: - options: - - 'high' - - 'low' max_tokens: name: Maximum Tokens description: 'Maximum number of tokens to generate' @@ -313,17 +291,6 @@ stream_analyzer: number: min: 512 max: 1920 - detail: - name: Detail - required: false - description: "Detail parameter, leave empty for 'auto'" - default: 'low' - example: 'low' - selector: - select: - options: - - 'high' - - 'low' max_tokens: name: Maximum Tokens description: 'Maximum number of tokens to generate' @@ -426,17 +393,6 @@ data_analyzer: number: min: 512 max: 1920 - detail: - name: Detail - required: false - description: "Detail parameter. Leave empty for 'auto'" - default: 'high' - example: 'high' - selector: - select: - options: - - 'high' - - 'low' max_tokens: name: Maximum Tokens description: 'Maximum number of tokens to generate. A low value is recommended since this will likely result in a number.' From bdba949ad3f4723c087cc456d5c35834f1540b05 Mon Sep 17 00:00:00 2001 From: valentinfrlch Date: Wed, 13 Nov 2024 21:42:34 +0100 Subject: [PATCH 4/9] genAI titles --- custom_components/llmvision/__init__.py | 156 ++++++++++------------ custom_components/llmvision/const.py | 1 + custom_components/llmvision/manifest.json | 2 +- custom_components/llmvision/providers.py | 143 +++++++++++--------- custom_components/llmvision/services.yaml | 7 + 5 files changed, 155 insertions(+), 154 deletions(-) diff --git a/custom_components/llmvision/__init__.py b/custom_components/llmvision/__init__.py index 42fe6c6..c8329dc 100644 --- a/custom_components/llmvision/__init__.py +++ b/custom_components/llmvision/__init__.py @@ -30,13 +30,14 @@ TEMPERATURE, INCLUDE_FILENAME, EXPOSE_IMAGES, + GENERATE_TITLE, SENSOR_ENTITY, ) from .calendar import SemanticIndex from datetime import timedelta from homeassistant.util import dt as dt_util from homeassistant.config_entries import ConfigEntry -from .providers import RequestHandler +from .providers import Request from .media_handlers import MediaProcessor from homeassistant.core import SupportsResponse from homeassistant.exceptions import ServiceValidationError @@ -146,35 +147,8 @@ async def _remember(hass, call, start, response): f"'Event Calendar' config not found") semantic_index = SemanticIndex(hass, config_entry) - # Define a mapping of keywords to labels - keyword_to_label = { - "person": "Person", - "man": "Person", - "woman": "Person", - "individual": "Person", - "delivery": "Delivery", - "courier": "Courier", - "package": "Package", - "car": "Car", - "vehicle": "Car", - "bike": "Bike", - "bicycle": "Bike", - "bus": "Bus", - "truck": "Truck", - "motorcycle": "Motorcycle", - "bicycle": "Bicycle", - "dog": "Dog", - "cat": "Cat", - } - - # Default label - label = "Unknown object" - - # Check each keyword in the response text and update the label accordingly - for keyword, mapped_label in keyword_to_label.items(): - if keyword in response["response_text"].lower(): - label = mapped_label - break + + title = response.get("title", "Unknown object seen") if call.image_entities and len(call.image_entities) > 0: camera_name = call.image_entities[0] @@ -189,7 +163,7 @@ async def _remember(hass, call, start, response): await semantic_index.remember( start=start, end=dt_util.now() + timedelta(minutes=1), - label=label + " seen", + label=title, camera_name=camera_name, summary=response["response_text"] ) @@ -198,7 +172,8 @@ async def _remember(hass, call, start, response): async def _update_sensor(hass, sensor_entity, new_value): """Update the value of a sensor entity.""" if sensor_entity: - _LOGGER.info(f"Updating sensor {sensor_entity} with new value: {new_value}") + _LOGGER.info( + f"Updating sensor {sensor_entity} with new value: {new_value}") try: hass.states.async_set(sensor_entity, new_value) except Exception as e: @@ -232,6 +207,7 @@ def __init__(self, data_call): self.max_tokens = int(data_call.data.get(MAXTOKENS, 100)) self.include_filename = data_call.data.get(INCLUDE_FILENAME, False) self.expose_images = data_call.data.get(EXPOSE_IMAGES, False) + self.generate_title = data_call.data.get(GENERATE_TITLE, False) self.sensor_entity = data_call.data.get(SENSOR_ENTITY) def get_service_call_data(self): @@ -246,24 +222,24 @@ async def image_analyzer(data_call): # Initialize call object with service call data call = ServiceCallData(data_call).get_service_call_data() # Initialize the RequestHandler client - client = RequestHandler(hass, - message=call.message, - max_tokens=call.max_tokens, - temperature=call.temperature, - ) + request = Request(hass=hass, + message=call.message, + max_tokens=call.max_tokens, + temperature=call.temperature, + ) # Fetch and preprocess images - processor = MediaProcessor(hass, client) + processor = MediaProcessor(hass, request) # Send images to RequestHandler client - client = await processor.add_images(image_entities=call.image_entities, - image_paths=call.image_paths, - target_width=call.target_width, - include_filename=call.include_filename, - expose_images=call.expose_images - ) + request = await processor.add_images(image_entities=call.image_entities, + image_paths=call.image_paths, + target_width=call.target_width, + include_filename=call.include_filename, + expose_images=call.expose_images + ) # Validate configuration, input data and make the call - response = await client.forward_request(call) + response = await request.call(call) await _remember(hass, call, start, response) return response @@ -272,20 +248,20 @@ async def video_analyzer(data_call): start = dt_util.now() call = ServiceCallData(data_call).get_service_call_data() call.message = "The attached images are frames from a video. " + call.message - client = RequestHandler(hass, - message=call.message, - max_tokens=call.max_tokens, - temperature=call.temperature, - ) - processor = MediaProcessor(hass, client) - client = await processor.add_videos(video_paths=call.video_paths, - event_ids=call.event_id, - max_frames=call.max_frames, - target_width=call.target_width, - include_filename=call.include_filename, - expose_images=call.expose_images - ) - response = await client.forward_request(call) + request = Request(hass, + message=call.message, + max_tokens=call.max_tokens, + temperature=call.temperature, + ) + processor = MediaProcessor(hass, request) + request = await processor.add_videos(video_paths=call.video_paths, + event_ids=call.event_id, + max_frames=call.max_frames, + target_width=call.target_width, + include_filename=call.include_filename, + expose_images=call.expose_images + ) + response = await request.call(call) await _remember(hass, call, start, response) return response @@ -294,21 +270,21 @@ async def stream_analyzer(data_call): start = dt_util.now() call = ServiceCallData(data_call).get_service_call_data() call.message = "The attached images are frames from a live camera feed. " + call.message - client = RequestHandler(hass, - message=call.message, - max_tokens=call.max_tokens, - temperature=call.temperature, - ) - processor = MediaProcessor(hass, client) - client = await processor.add_streams(image_entities=call.image_entities, - duration=call.duration, - max_frames=call.max_frames, - target_width=call.target_width, - include_filename=call.include_filename, - expose_images=call.expose_images - ) - - response = await client.forward_request(call) + request = Request(hass, + message=call.message, + max_tokens=call.max_tokens, + temperature=call.temperature, + ) + processor = MediaProcessor(hass, request) + request = await processor.add_streams(image_entities=call.image_entities, + duration=call.duration, + max_frames=call.max_frames, + target_width=call.target_width, + include_filename=call.include_filename, + expose_images=call.expose_images + ) + + response = await request.call(call) await _remember(hass, call, start, response) return response @@ -321,12 +297,12 @@ def is_number(s): return True except ValueError: return False - + start = dt_util.now() call = ServiceCallData(data_call).get_service_call_data() sensor_entity = data_call.data.get("sensor_entity") _LOGGER.info(f"Sensor entity: {sensor_entity}") - + # get current value to determine data type state = hass.states.get(sensor_entity).state _LOGGER.info(f"Current state: {state}") @@ -338,24 +314,26 @@ def is_number(s): data_type = "number" else: if "options" in hass.states.get(sensor_entity).attributes: - data_type = "one of these options: " + ", ".join([f"'{option}'" for option in hass.states.get(sensor_entity).attributes["options"]]) + data_type = "one of these options: " + \ + ", ".join([f"'{option}'" for option in hass.states.get( + sensor_entity).attributes["options"]]) else: data_type = "string" message = f"Your job is to extract data from images. Return a {data_type} only. No additional text or other options allowed!. If unsure, choose the option that best matches. Follow these instructions: " + call.message _LOGGER.info(f"Message: {message}") - client = RequestHandler(hass, - message=message, - max_tokens=call.max_tokens, - temperature=call.temperature, - ) - processor = MediaProcessor(hass, client) - client = await processor.add_visual_data(image_entities=call.image_entities, - image_paths=call.image_paths, - target_width=call.target_width, - include_filename=call.include_filename - ) - response = await client.forward_request(call) + request = Request(hass, + message=message, + max_tokens=call.max_tokens, + temperature=call.temperature, + ) + processor = MediaProcessor(hass, request) + request = await processor.add_visual_data(image_entities=call.image_entities, + image_paths=call.image_paths, + target_width=call.target_width, + include_filename=call.include_filename + ) + response = await request.call(call) _LOGGER.info(f"Response: {response}") # udpate sensor in data_call.data.get("sensor_entity") await _update_sensor(hass, sensor_entity, response["response_text"]) diff --git a/custom_components/llmvision/const.py b/custom_components/llmvision/const.py index 09b21e4..b67815a 100644 --- a/custom_components/llmvision/const.py +++ b/custom_components/llmvision/const.py @@ -35,6 +35,7 @@ TEMPERATURE = 'temperature' INCLUDE_FILENAME = 'include_filename' EXPOSE_IMAGES = 'expose_images' +GENERATE_TITLE = 'generate_title' SENSOR_ENTITY = 'sensor_entity' # Error messages diff --git a/custom_components/llmvision/manifest.json b/custom_components/llmvision/manifest.json index e6074bb..dc298f5 100644 --- a/custom_components/llmvision/manifest.json +++ b/custom_components/llmvision/manifest.json @@ -2,10 +2,10 @@ "domain": "llmvision", "name": "LLM Vision", "codeowners": ["@valentinfrlch"], - "requirements": ["openai==1.54.3"], "config_flow": true, "documentation": "https://github.com/valentinfrlch/ha-llmvision", "iot_class": "cloud_polling", "issue_tracker": "https://github.com/valentinfrlch/ha-llmvision/issues", + "requirements": ["openai==1.54.3"], "version": "1.3.2" } \ No newline at end of file diff --git a/custom_components/llmvision/providers.py b/custom_components/llmvision/providers.py index aad7dc6..1adafd4 100644 --- a/custom_components/llmvision/providers.py +++ b/custom_components/llmvision/providers.py @@ -1,7 +1,8 @@ from abc import ABC, abstractmethod -import openai +from openai import AsyncOpenAI from homeassistant.exceptions import ServiceValidationError from homeassistant.helpers.aiohttp_client import async_get_clientsession +from homeassistant.helpers.httpx_client import get_async_client import logging import asyncio import inspect @@ -88,7 +89,7 @@ def default_model(provider): return { }.get(provider, "gpt-4o-mini") # Default value -class RequestHandler: +class Request: def __init__(self, hass, message, max_tokens, temperature): self.session = async_get_clientsession(hass) self.hass = hass @@ -98,74 +99,82 @@ def __init__(self, hass, message, max_tokens, temperature): self.base64_images = [] self.filenames = [] - async def forward_request(self, call): + async def call(self, call): entry_id = call.provider provider = get_provider(self.hass, entry_id) _LOGGER.info(f"Provider from call: {provider}") model = call.model if call.model != "None" else default_model(provider) + gen_title_prompt = "Generate a title for this image. Follow the schema: ' seen'. Do not mention the time or where. Do not speculate." + + config = self.hass.data.get(DOMAIN).get(entry_id) if provider == 'OpenAI': - api_key = self.hass.data.get(DOMAIN).get( - entry_id).get(CONF_OPENAI_API_KEY) + api_key = config.get(CONF_OPENAI_API_KEY) - request = OpenAI(self.session, model, self.max_tokens, self.temperature, + request = OpenAI(self.hass, model, self.max_tokens, self.temperature, self.message, self.base64_images, self.filenames) response_text = await request.vision_request(api_key=api_key) + if call.generate_title: + _LOGGER.info(f"Generating title with prompt: {gen_title_prompt}") + gen_title = await request.vision_request(api_key=api_key, prompt=gen_title_prompt) elif provider == 'Anthropic': - api_key = self.hass.data.get(DOMAIN).get( - entry_id).get(CONF_ANTHROPIC_API_KEY) + api_key = config.get(CONF_ANTHROPIC_API_KEY) - request = Anthropic(self.session, model, self.max_tokens, self.temperature, + request = Anthropic(self.hass, model, self.max_tokens, self.temperature, self.message, self.base64_images, self.filenames) response_text = await request.vision_request(api_key=api_key, endpoint=ENDPOINT_ANTHROPIC) + if call.generate_title: + gen_title = await request.vision_request(api_key=api_key, prompt=gen_title_prompt) elif provider == 'Google': - api_key = self.hass.data.get(DOMAIN).get( - entry_id).get(CONF_GOOGLE_API_KEY) + api_key = config.get(CONF_GOOGLE_API_KEY) - request = Google(self.session, model, self.max_tokens, self.temperature, + request = Google(self.hass, model, self.max_tokens, self.temperature, self.message, self.base64_images, self.filenames) response_text = await request.vision_request(api_key=api_key, endpoint=ENDPOINT_GOOGLE) + if call.generate_title: + gen_title = await request.vision_request(api_key=api_key, prompt=gen_title_prompt) elif provider == 'Groq': - api_key = self.hass.data.get(DOMAIN).get( - entry_id).get(CONF_GROQ_API_KEY) + api_key = config.get(CONF_GROQ_API_KEY) - request = Groq(self.session, model, self.max_tokens, self.temperature, + request = Groq(self.hass, model, self.max_tokens, self.temperature, self.message, self.base64_images, self.filenames) response_text = await request.vision_request(api_key=api_key, endpoint=ENDPOINT_GROQ) + if call.generate_title: + gen_title = await request.vision_request(api_key=api_key, prompt=gen_title_prompt) elif provider == 'LocalAI': - ip_address = self.hass.data.get(DOMAIN).get( - entry_id).get(CONF_LOCALAI_IP_ADDRESS) - port = self.hass.data.get(DOMAIN).get( - entry_id).get(CONF_LOCALAI_PORT) - https = self.hass.data.get(DOMAIN).get( - entry_id).get(CONF_LOCALAI_HTTPS, False) - - request = LocalAI(self.session, model, self.max_tokens, self.temperature, + ip_address = config.get(CONF_LOCALAI_IP_ADDRESS) + port = config.get(CONF_LOCALAI_PORT) + https = config.get(CONF_LOCALAI_HTTPS, False) + + request = LocalAI(self.hass, model, self.max_tokens, self.temperature, self.message, self.base64_images, self.filenames) response_text = await request.vision_request(endpoint=ENDPOINT_LOCALAI, ip_address=ip_address, port=port, https=https) + if call.generate_title: + gen_title = await request.vision_request(api_key=api_key, prompt=gen_title_prompt) elif provider == 'Ollama': - ip_address = self.hass.data.get(DOMAIN).get( - entry_id).get(CONF_OLLAMA_IP_ADDRESS) - port = self.hass.data.get(DOMAIN).get( - entry_id).get(CONF_OLLAMA_PORT) - https = self.hass.data.get(DOMAIN).get( - entry_id).get(CONF_OLLAMA_HTTPS, False) - - request = Ollama(self.session, model, self.max_tokens, self.temperature, + ip_address = config.get(CONF_OLLAMA_IP_ADDRESS) + port = config.get(CONF_OLLAMA_PORT) + https = config.get(CONF_OLLAMA_HTTPS, False) + + request = Ollama(self.hass, model, self.max_tokens, self.temperature, self.message, self.base64_images, self.filenames) response_text = await request.vision_request(endpoint=ENDPOINT_OLLAMA, ip_address=ip_address, port=port, https=https) + if call.generate_title: + gen_title = await request.vision_request(api_key=api_key, prompt=gen_title_prompt) elif provider == 'Custom OpenAI': - api_key = self.hass.data.get(DOMAIN).get( - entry_id).get(CONF_CUSTOM_OPENAI_API_KEY, "") - endpoint = self.hass.data.get(DOMAIN).get(entry_id).get( + api_key = config.get(CONF_CUSTOM_OPENAI_API_KEY, "") + endpoint = config.get( CONF_CUSTOM_OPENAI_ENDPOINT) + "/v1/chat/completions" - - request = OpenAI(self.session, model, self.max_tokens, self.temperature, + + request = OpenAI(self.hass, model, self.max_tokens, self.temperature, self.message, self.base64_images, self.filenames) response_text = await request.vision_request(api_key, endpoint) + if call.generate_title: + gen_title = await request.vision_request(api_key=api_key, prompt=gen_title_prompt) else: raise ServiceValidationError("invalid_provider") - return {"response_text": response_text} + + return {"title": gen_title.replace(".", "") if 'gen_title' in locals() else None, "response_text": response_text} def add_frame(self, base64_image, filename): self.base64_images.append(base64_image) @@ -194,8 +203,9 @@ async def _resolve_error(self, response, provider): class Provider(ABC): - def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames): - self.session = session + def __init__(self, hass, model, max_tokens, temperature, message, base64_images, filenames): + self.hass = hass + self.session = async_get_clientsession(hass) self.model = model self.max_tokens = max_tokens self.temperature = temperature @@ -218,8 +228,10 @@ def validate_images(self): async def vision_request(self, **kwargs) -> str: self.validate_images() self.validate(**kwargs) + if "prompt" in kwargs: + _LOGGER.info(f"Prompt received: {kwargs.get('prompt')}") + self.message = kwargs.get("prompt") kwargs["data"] = self._prepare_vision_data() - _LOGGER.info(f"kwargs: {kwargs.items()}") return await self._make_request(**kwargs) async def text_request(self, **kwargs) -> str: @@ -269,7 +281,7 @@ async def _fetch(self, url, max_retries=2, retry_delay=1): retries += 1 await asyncio.sleep(retry_delay) _LOGGER.warning(f"Failed to fetch {url} after {max_retries} retries") - + async def _resolve_error(self, response, provider) -> str: """Translate response status to error message""" import json @@ -293,17 +305,19 @@ async def _resolve_error(self, response, provider) -> str: class OpenAI(Provider): - def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames): - super().__init__(session, model, max_tokens, temperature, + def __init__(self, hass, model, max_tokens, temperature, message, base64_images, filenames): + super().__init__(hass, model, max_tokens, temperature, message, base64_images, filenames) async def _make_request(self, **kwargs) -> str: - openai.api_key = kwargs.get("api_key") + client = AsyncOpenAI( + api_key=kwargs.get("api_key"), + http_client=get_async_client(self.hass), + ) + messages = kwargs.get("data") - if "endpoint" in kwargs: - openai.base_url = kwargs.get("endpoint") - response = openai.chat.completions.create( + response = await client.chat.completions.create( model=self.model, messages=messages, max_tokens=self.max_tokens, @@ -326,15 +340,15 @@ def _prepare_vision_data(self) -> list: def _prepare_text_data(self) -> list: return [{"role": "user", "content": [{"type": "text", "text": self.message}]}] - + def validate(self, **kwargs): if not kwargs.get("api_key"): raise ServiceValidationError(ERROR_OPENAI_NOT_CONFIGURED) - + class Anthropic(Provider): - def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames): - super().__init__(session, model, max_tokens, temperature, + def __init__(self, hass, model, max_tokens, temperature, message, base64_images, filenames): + super().__init__(hass, model, max_tokens, temperature, message, base64_images, filenames) def _generate_headers(self, api_key: str) -> dict: @@ -386,8 +400,8 @@ def validate(self, **kwargs): class Google(Provider): - def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames): - super().__init__(session, model, max_tokens, temperature, + def __init__(self, hass, model, max_tokens, temperature, message, base64_images, filenames): + super().__init__(hass, model, max_tokens, temperature, message, base64_images, filenames) def _generate_headers(self) -> dict: @@ -421,15 +435,15 @@ def _prepare_text_data(self) -> dict: "contents": [{"role": "user", "parts": [{"text": self.message + ":"}]}], "generationConfig": {"maxOutputTokens": self.max_tokens, "temperature": self.temperature} } - + def validate(self, **kwargs): if not kwargs.get("api_key"): raise ServiceValidationError(ERROR_GOOGLE_NOT_CONFIGURED) class Groq(Provider): - def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames): - super().__init__(session, model, max_tokens, temperature, + def __init__(self, hass, model, max_tokens, temperature, message, base64_images, filenames): + super().__init__(hass, model, max_tokens, temperature, message, base64_images, filenames) def _generate_headers(self, api_key: str) -> dict: @@ -475,7 +489,7 @@ def _prepare_text_data(self) -> dict: ], "model": self.model } - + def validate(self, **kwargs): if not kwargs.get("api_key"): raise ServiceValidationError(ERROR_GROQ_NOT_CONFIGURED) @@ -484,8 +498,8 @@ def validate(self, **kwargs): class LocalAI(Provider): - def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames): - super().__init__(session, model, max_tokens, temperature, + def __init__(self, hass, model, max_tokens, temperature, message, base64_images, filenames): + super().__init__(hass, model, max_tokens, temperature, message, base64_images, filenames) async def _make_request(self, **kwargs) -> str: @@ -523,15 +537,15 @@ def _prepare_text_data(self) -> dict: "max_tokens": self.max_tokens, "temperature": self.temperature } - + def validate(self, **kwargs): if not kwargs.get("ip_address") or not kwargs.get("port"): raise ServiceValidationError(ERROR_LOCALAI_NOT_CONFIGURED) class Ollama(Provider): - def __init__(self, session, model, max_tokens, temperature, message, base64_images, filenames): - super().__init__(session, model, max_tokens, temperature, + def __init__(self, hass, model, max_tokens, temperature, message, base64_images, filenames): + super().__init__(hass, model, max_tokens, temperature, message, base64_images, filenames) async def _make_request(self, **kwargs) -> str: @@ -541,7 +555,8 @@ async def _make_request(self, **kwargs) -> str: ip_address = kwargs.get("ip_address") port = kwargs.get("port") - _LOGGER.info(f"endpoint: {endpoint} https: {https} ip_address: {ip_address} port: {port}") + _LOGGER.info( + f"endpoint: {endpoint} https: {https} ip_address: {ip_address} port: {port}") headers = {} protocol = "https" if https else "http" @@ -572,4 +587,4 @@ def _prepare_text_data(self) -> dict: def validate(self, **kwargs): if not kwargs.get("ip_address") or not kwargs.get("port"): - raise ServiceValidationError(ERROR_OLLAMA_NOT_CONFIGURED) \ No newline at end of file + raise ServiceValidationError(ERROR_OLLAMA_NOT_CONFIGURED) diff --git a/custom_components/llmvision/services.yaml b/custom_components/llmvision/services.yaml index e6bfc64..be7fd6e 100644 --- a/custom_components/llmvision/services.yaml +++ b/custom_components/llmvision/services.yaml @@ -89,6 +89,13 @@ image_analyzer: min: 0.1 max: 1.0 step: 0.1 + generate_title: + name: Generate Title + required: false + description: Generate a title. (Used for notifications) + default: false + selector: + boolean: expose_images: name: Expose Images description: (Experimental) Expose analyzed frames after processing. This will save analyzed frames in /www/llmvision so they can be used for notifications. (Only works for entity input, include camera name should be enabled). Existing files will be overwritten. From b532660f7e09282cbf20d529a64065b57ee790ea Mon Sep 17 00:00:00 2001 From: valentinfrlch Date: Sun, 17 Nov 2024 21:29:06 +0100 Subject: [PATCH 5/9] (WIP): #92, #83, #64, #89 migrating to OpenAI SDK, adding Azure provider --- custom_components/llmvision/__init__.py | 23 +++- custom_components/llmvision/calendar.py | 4 +- custom_components/llmvision/config_flow.py | 95 ++++++++++++++-- custom_components/llmvision/const.py | 11 +- custom_components/llmvision/providers.py | 120 ++++++++++++++++----- 5 files changed, 200 insertions(+), 53 deletions(-) diff --git a/custom_components/llmvision/__init__.py b/custom_components/llmvision/__init__.py index c8329dc..28393e5 100644 --- a/custom_components/llmvision/__init__.py +++ b/custom_components/llmvision/__init__.py @@ -2,6 +2,9 @@ from .const import ( DOMAIN, CONF_OPENAI_API_KEY, + CONF_AZURE_API_KEY, + CONF_AZURE_ENDPOINT, + CONF_AZURE_VERSION, CONF_ANTHROPIC_API_KEY, CONF_GOOGLE_API_KEY, CONF_GROQ_API_KEY, @@ -34,13 +37,15 @@ SENSOR_ENTITY, ) from .calendar import SemanticIndex +from .providers import Request +from .media_handlers import MediaProcessor +import os from datetime import timedelta from homeassistant.util import dt as dt_util from homeassistant.config_entries import ConfigEntry -from .providers import Request -from .media_handlers import MediaProcessor from homeassistant.core import SupportsResponse from homeassistant.exceptions import ServiceValidationError +from functools import partial import logging _LOGGER = logging.getLogger(__name__) @@ -53,6 +58,9 @@ async def async_setup_entry(hass, entry): # Get all entries from config flow openai_api_key = entry.data.get(CONF_OPENAI_API_KEY) + azure_api_key = entry.data.get(CONF_AZURE_API_KEY) + azure_endpoint = entry.data.get(CONF_AZURE_ENDPOINT) + azure_version = entry.data.get(CONF_AZURE_VERSION) anthropic_api_key = entry.data.get(CONF_ANTHROPIC_API_KEY) google_api_key = entry.data.get(CONF_GOOGLE_API_KEY) groq_api_key = entry.data.get(CONF_GROQ_API_KEY) @@ -73,6 +81,9 @@ async def async_setup_entry(hass, entry): # Create a dictionary for the entry data entry_data = { CONF_OPENAI_API_KEY: openai_api_key, + CONF_AZURE_API_KEY: azure_api_key, + CONF_AZURE_ENDPOINT: azure_endpoint, + CONF_AZURE_VERSION: azure_version, CONF_ANTHROPIC_API_KEY: anthropic_api_key, CONF_GOOGLE_API_KEY: google_api_key, CONF_GROQ_API_KEY: groq_api_key, @@ -96,6 +107,8 @@ async def async_setup_entry(hass, entry): # check if the entry is the calendar entry (has entry rentention_time) if filtered_entry_data.get(CONF_RETENTION_TIME) is not None: + # make sure 'llmvision' directory exists + await hass.loop.run_in_executor(None, partial(os.makedirs, "/llmvision", exist_ok=True)) # forward the calendar entity to the platform await hass.config_entries.async_forward_entry_setups(entry, ["calendar"]) @@ -158,13 +171,13 @@ async def _remember(hass, call, start, response): else: camera_name = "Unknown" - camera_name = camera_name.replace("camera.", "").replace("image.", "") + camera_name = camera_name.replace("camera.", "").replace("image.", "").capitalize() await semantic_index.remember( start=start, end=dt_util.now() + timedelta(minutes=1), - label=title, - camera_name=camera_name, + label=title + " near " + camera_name if camera_name != "Unknown" else title, + camera_name=camera_name if camera_name != "Unknown" else "Image Input", summary=response["response_text"] ) diff --git a/custom_components/llmvision/calendar.py b/custom_components/llmvision/calendar.py index 9276cb8..70a2eaa 100644 --- a/custom_components/llmvision/calendar.py +++ b/custom_components/llmvision/calendar.py @@ -37,10 +37,10 @@ def __init__(self, hass: HomeAssistant, config_entry: ConfigEntry): self._attr_supported_features = (CalendarEntityFeature.DELETE_EVENT) # Path to the JSON file where events are stored self._file_path = os.path.join( - self.hass.config.path("custom_components/llmvision"), "events.json" + self.hass.config.path("llmvision"), "events.json" ) self.hass.loop.create_task(self.async_update()) - + def _ensure_datetime(self, dt): """Ensure the input is a datetime.datetime object.""" if isinstance(dt, datetime.date) and not isinstance(dt, datetime.datetime): diff --git a/custom_components/llmvision/config_flow.py b/custom_components/llmvision/config_flow.py index abb4e68..ef3b6d3 100644 --- a/custom_components/llmvision/config_flow.py +++ b/custom_components/llmvision/config_flow.py @@ -1,11 +1,16 @@ +from openai import AsyncOpenAI, AsyncAzureOpenAI from homeassistant import config_entries from homeassistant.helpers.selector import selector from homeassistant.exceptions import ServiceValidationError from homeassistant.helpers.aiohttp_client import async_get_clientsession +from homeassistant.helpers.httpx_client import get_async_client import urllib.parse from .const import ( DOMAIN, CONF_OPENAI_API_KEY, + CONF_AZURE_API_KEY, + CONF_AZURE_ENDPOINT, + CONF_AZURE_VERSION, CONF_ANTHROPIC_API_KEY, CONF_GOOGLE_API_KEY, CONF_GROQ_API_KEY, @@ -36,12 +41,42 @@ async def _validate_api_key(self, api_key): _LOGGER.error("You need to provide a valid API key.") raise ServiceValidationError("empty_api_key") elif self.user_input["provider"] == "OpenAI": - header = {'Content-type': 'application/json', - 'Authorization': 'Bearer ' + api_key} - base_url = "api.openai.com" - endpoint = "/v1/models" - payload = {} - method = "GET" + # TODO: Implement OpenAI handshake with OpenAI SDK + client = AsyncOpenAI( + api_key=api_key, + http_client=get_async_client(self.hass), + ) + try: + await client.models.list() + return True + except Exception as e: + _LOGGER.error(f"Could not connect to OpenAI: {e}") + return False + elif self.user_input["provider"] == "Custom OpenAI": + client = AsyncOpenAI( + api_key=api_key, + http_client=get_async_client(self.hass), + endpoint=self.user_input[CONF_CUSTOM_OPENAI_ENDPOINT] + ) + try: + await client.models.list() + return True + except Exception as e: + _LOGGER.error(f"Could not connect to Custom OpenAI: {e}") + return False + elif self.user_input["provider"] == "Azure": + client = AsyncAzureOpenAI( + api_key=api_key, + api_version="2024-10-01-preview", + azure_endpoint="https://llmvision-test.openai.azure.com/", + http_client=get_async_client(self.hass), + ) + try: + await client.models.list() + return True + except Exception as e: + _LOGGER.error(f"Could not connect to Azure: {e}") + return False elif self.user_input["provider"] == "Anthropic": header = { 'x-api-key': api_key, @@ -136,6 +171,12 @@ async def openai(self): _LOGGER.error("Could not connect to OpenAI server.") raise ServiceValidationError("handshake_failed") + async def azure(self): + self._validate_provider() + if not await self._validate_api_key(self.user_input[CONF_AZURE_API_KEY]): + _LOGGER.error("Could not connect to Azure server.") + raise ServiceValidationError("handshake_failed") + async def custom_openai(self): self._validate_provider() try: @@ -196,6 +237,8 @@ def get_configured_providers(self): providers.append("OpenAI") if CONF_ANTHROPIC_API_KEY in self.hass.data[DOMAIN]: providers.append("Anthropic") + if CONF_AZURE_API_KEY in self.hass.data[DOMAIN]: + providers.append("Azure") if CONF_GOOGLE_API_KEY in self.hass.data[DOMAIN]: providers.append("Google") if CONF_LOCALAI_IP_ADDRESS in self.hass.data[DOMAIN] and CONF_LOCALAI_PORT in self.hass.data[DOMAIN]: @@ -215,14 +258,15 @@ class llmvisionConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): async def handle_provider(self, provider): provider_steps = { - "Event Calendar": self.async_step_semantic_index, - "OpenAI": self.async_step_openai, "Anthropic": self.async_step_anthropic, + "Azure": self.async_step_azure, + "Custom OpenAI": self.async_step_custom_openai, + "Event Calendar": self.async_step_semantic_index, "Google": self.async_step_google, "Groq": self.async_step_groq, - "Ollama": self.async_step_ollama, "LocalAI": self.async_step_localai, - "Custom OpenAI": self.async_step_custom_openai, + "Ollama": self.async_step_ollama, + "OpenAI": self.async_step_openai, } step_method = provider_steps.get(provider) @@ -236,7 +280,7 @@ async def async_step_user(self, user_input=None): data_schema = vol.Schema({ vol.Required("provider", default="Event Calendar"): selector({ "select": { - "options": ["Event Calendar", "OpenAI", "Anthropic", "Google", "Groq", "Ollama", "LocalAI", "Custom OpenAI"], + "options": ["Anthropic", "Azure", "Google", "Groq", "LocalAI", "Ollama", "OpenAI", "Custom OpenAI", "Event Calendar"], "mode": "dropdown", "sort": False, "custom_value": False @@ -338,6 +382,35 @@ async def async_step_openai(self, user_input=None): data_schema=data_schema, ) + async def async_step_azure(self, user_input=None): + data_schema = vol.Schema({ + vol.Required(CONF_AZURE_API_KEY): str, + vol.Required(CONF_AZURE_ENDPOINT, default="https://domain.openai.azure.com/"): str, + vol.Required(CONF_AZURE_VERSION, default="2024-10-01-preview"): str, + }) + + if user_input is not None: + # save provider to user_input + user_input["provider"] = self.init_info["provider"] + validator = Validator(self.hass, user_input) + try: + await validator.azure() + # add the mode to user_input + user_input["provider"] = self.init_info["provider"] + return self.async_create_entry(title="Azure", data=user_input) + except ServiceValidationError as e: + _LOGGER.error(f"Validation failed: {e}") + return self.async_show_form( + step_id="azure", + data_schema=data_schema, + errors={"base": "handshake_failed"} + ) + + return self.async_show_form( + step_id="azure", + data_schema=data_schema, + ) + async def async_step_anthropic(self, user_input=None): data_schema = vol.Schema({ vol.Required(CONF_ANTHROPIC_API_KEY): str, diff --git a/custom_components/llmvision/const.py b/custom_components/llmvision/const.py index b67815a..49403d4 100644 --- a/custom_components/llmvision/const.py +++ b/custom_components/llmvision/const.py @@ -5,6 +5,9 @@ # Configuration values from setup CONF_OPENAI_API_KEY = 'openai_api_key' +CONF_AZURE_API_KEY = 'azure_api_key' +CONF_AZURE_ENDPOINT = 'azure_endpoint' +CONF_AZURE_VERSION = 'azure_version' CONF_ANTHROPIC_API_KEY = 'anthropic_api_key' CONF_GOOGLE_API_KEY = 'google_api_key' CONF_GROQ_API_KEY = 'groq_api_key' @@ -39,14 +42,8 @@ SENSOR_ENTITY = 'sensor_entity' # Error messages -ERROR_OPENAI_NOT_CONFIGURED = "OpenAI is not configured" -ERROR_ANTHROPIC_NOT_CONFIGURED = "Anthropic is not configured" -ERROR_GOOGLE_NOT_CONFIGURED = "Google is not configured" -ERROR_GROQ_NOT_CONFIGURED = "Groq is not configured" +ERROR_NOT_CONFIGURED = "{provider} is not configured" ERROR_GROQ_MULTIPLE_IMAGES = "Groq does not support videos or streams" -ERROR_LOCALAI_NOT_CONFIGURED = "LocalAI is not configured" -ERROR_OLLAMA_NOT_CONFIGURED = "Ollama is not configured" -ERROR_CUSTOM_OPENAI_NOT_CONFIGURED = "Custom OpenAI provider is not configured" ERROR_NO_IMAGE_INPUT = "No image input provided" ERROR_HANDSHAKE_FAILED = "Connection could not be established" diff --git a/custom_components/llmvision/providers.py b/custom_components/llmvision/providers.py index 1adafd4..b213d8a 100644 --- a/custom_components/llmvision/providers.py +++ b/custom_components/llmvision/providers.py @@ -1,5 +1,5 @@ from abc import ABC, abstractmethod -from openai import AsyncOpenAI +from openai import AsyncOpenAI, AsyncAzureOpenAI from homeassistant.exceptions import ServiceValidationError from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.httpx_client import get_async_client @@ -9,6 +9,9 @@ from .const import ( DOMAIN, CONF_OPENAI_API_KEY, + CONF_AZURE_API_KEY, + CONF_AZURE_ENDPOINT, + CONF_AZURE_VERSION, CONF_ANTHROPIC_API_KEY, CONF_GOOGLE_API_KEY, CONF_GROQ_API_KEY, @@ -26,14 +29,9 @@ ENDPOINT_LOCALAI, ENDPOINT_OLLAMA, ENDPOINT_GROQ, - ERROR_OPENAI_NOT_CONFIGURED, - ERROR_ANTHROPIC_NOT_CONFIGURED, - ERROR_GOOGLE_NOT_CONFIGURED, - ERROR_GROQ_NOT_CONFIGURED, + ERROR_NOT_CONFIGURED, ERROR_GROQ_MULTIPLE_IMAGES, - ERROR_LOCALAI_NOT_CONFIGURED, - ERROR_OLLAMA_NOT_CONFIGURED, - ERROR_NO_IMAGE_INPUT + ERROR_NO_IMAGE_INPUT, ) _LOGGER = logging.getLogger(__name__) @@ -62,6 +60,8 @@ def get_provider(hass, provider_uid): if CONF_OPENAI_API_KEY in entry_data: return "OpenAI" + elif CONF_AZURE_API_KEY in entry_data: + return "Azure" elif CONF_ANTHROPIC_API_KEY in entry_data: return "Anthropic" elif CONF_GOOGLE_API_KEY in entry_data: @@ -104,9 +104,9 @@ async def call(self, call): provider = get_provider(self.hass, entry_id) _LOGGER.info(f"Provider from call: {provider}") model = call.model if call.model != "None" else default_model(provider) - gen_title_prompt = "Generate a title for this image. Follow the schema: ' seen'. Do not mention the time or where. Do not speculate." + gen_title_prompt = "Your job is to generate a title in the form ' seen' for texts. Do not mention the time, do not speculate. Generate a title for this text: {response}" - config = self.hass.data.get(DOMAIN).get(entry_id) + config = self.hass.data.get(DOMAIN).get(entry_id) if provider == 'OpenAI': api_key = config.get(CONF_OPENAI_API_KEY) @@ -115,8 +115,17 @@ async def call(self, call): self.message, self.base64_images, self.filenames) response_text = await request.vision_request(api_key=api_key) if call.generate_title: - _LOGGER.info(f"Generating title with prompt: {gen_title_prompt}") - gen_title = await request.vision_request(api_key=api_key, prompt=gen_title_prompt) + gen_title = await request.text_request(api_key=api_key, prompt=gen_title_prompt.format(response=response_text)) + if provider == 'Azure': + api_key = config.get(CONF_AZURE_API_KEY) + endpoint = config.get(CONF_AZURE_ENDPOINT) + version = config.get(CONF_AZURE_VERSION) + + request = AzureOpenAI(self.hass, model, self.max_tokens, self.temperature, + self.message, self.base64_images, self.filenames) + response_text = await request.vision_request(api_key=api_key, endpoint=endpoint, version=version) + if call.generate_title: + gen_title = await request.text_request(api_key=api_key, endpoint=endpoint, version=version, prompt=gen_title_prompt.format(response=response_text)) elif provider == 'Anthropic': api_key = config.get(CONF_ANTHROPIC_API_KEY) @@ -124,7 +133,7 @@ async def call(self, call): self.message, self.base64_images, self.filenames) response_text = await request.vision_request(api_key=api_key, endpoint=ENDPOINT_ANTHROPIC) if call.generate_title: - gen_title = await request.vision_request(api_key=api_key, prompt=gen_title_prompt) + gen_title = await request.text_request(api_key=api_key, prompt=gen_title_prompt.format(response=response_text)) elif provider == 'Google': api_key = config.get(CONF_GOOGLE_API_KEY) @@ -132,7 +141,7 @@ async def call(self, call): self.message, self.base64_images, self.filenames) response_text = await request.vision_request(api_key=api_key, endpoint=ENDPOINT_GOOGLE) if call.generate_title: - gen_title = await request.vision_request(api_key=api_key, prompt=gen_title_prompt) + gen_title = await request.text_request(api_key=api_key, prompt=gen_title_prompt.format(response=response_text)) elif provider == 'Groq': api_key = config.get(CONF_GROQ_API_KEY) @@ -140,7 +149,7 @@ async def call(self, call): self.message, self.base64_images, self.filenames) response_text = await request.vision_request(api_key=api_key, endpoint=ENDPOINT_GROQ) if call.generate_title: - gen_title = await request.vision_request(api_key=api_key, prompt=gen_title_prompt) + gen_title = await request.text_request(api_key=api_key, prompt=gen_title_prompt.format(response=response_text)) elif provider == 'LocalAI': ip_address = config.get(CONF_LOCALAI_IP_ADDRESS) port = config.get(CONF_LOCALAI_PORT) @@ -150,7 +159,7 @@ async def call(self, call): self.message, self.base64_images, self.filenames) response_text = await request.vision_request(endpoint=ENDPOINT_LOCALAI, ip_address=ip_address, port=port, https=https) if call.generate_title: - gen_title = await request.vision_request(api_key=api_key, prompt=gen_title_prompt) + gen_title = await request.text_request(api_key=api_key, prompt=gen_title_prompt.format(response=response_text)) elif provider == 'Ollama': ip_address = config.get(CONF_OLLAMA_IP_ADDRESS) port = config.get(CONF_OLLAMA_PORT) @@ -160,7 +169,7 @@ async def call(self, call): self.message, self.base64_images, self.filenames) response_text = await request.vision_request(endpoint=ENDPOINT_OLLAMA, ip_address=ip_address, port=port, https=https) if call.generate_title: - gen_title = await request.vision_request(api_key=api_key, prompt=gen_title_prompt) + gen_title = await request.text_request(api_key=api_key, prompt=gen_title_prompt.format(response=response_text)) elif provider == 'Custom OpenAI': api_key = config.get(CONF_CUSTOM_OPENAI_API_KEY, "") endpoint = config.get( @@ -170,11 +179,11 @@ async def call(self, call): self.message, self.base64_images, self.filenames) response_text = await request.vision_request(api_key, endpoint) if call.generate_title: - gen_title = await request.vision_request(api_key=api_key, prompt=gen_title_prompt) + gen_title = await request.text_request(api_key=api_key, prompt=gen_title_prompt.format(response=response_text)) else: raise ServiceValidationError("invalid_provider") - return {"title": gen_title.replace(".", "") if 'gen_title' in locals() else None, "response_text": response_text} + return {"title": gen_title.replace(".", "").replace("'", "") if 'gen_title' in locals() else None, "response_text": response_text} def add_frame(self, base64_image, filename): self.base64_images.append(base64_image) @@ -229,14 +238,18 @@ async def vision_request(self, **kwargs) -> str: self.validate_images() self.validate(**kwargs) if "prompt" in kwargs: - _LOGGER.info(f"Prompt received: {kwargs.get('prompt')}") self.message = kwargs.get("prompt") kwargs["data"] = self._prepare_vision_data() return await self._make_request(**kwargs) async def text_request(self, **kwargs) -> str: + _LOGGER.info(f"Request received: {kwargs.get('prompt')}") self.validate_images() self.validate(**kwargs) + if "prompt" in kwargs: + self.message = kwargs.get("prompt") + self.temperature = 0.1 + self.max_tokens = 3 kwargs["data"] = self._prepare_text_data() return await self._make_request(**kwargs) @@ -313,8 +326,53 @@ async def _make_request(self, **kwargs) -> str: client = AsyncOpenAI( api_key=kwargs.get("api_key"), http_client=get_async_client(self.hass), - ) - + ) + + messages = kwargs.get("data") + + response = await client.chat.completions.create( + model=self.model, + messages=messages, + max_tokens=self.max_tokens, + temperature=self.temperature, + ) + + response_text = response.choices[0].message.content + return response_text + + def _prepare_vision_data(self) -> list: + messages = [{"role": "user", "content": []}] + for image, filename in zip(self.base64_images, self.filenames): + tag = ("Image " + str(self.base64_images.index(image) + 1) + ) if filename == "" else filename + messages[0]["content"].append({"type": "text", "text": tag + ":"}) + messages[0]["content"].append({"type": "image_url", "image_url": { + "url": f"data:image/jpeg;base64,{image}"}}) + messages[0]["content"].append({"type": "text", "text": self.message}) + return messages + + def _prepare_text_data(self) -> list: + return [{"role": "user", "content": [{"type": "text", "text": self.message}]}] + + def validate(self, **kwargs): + if not kwargs.get("api_key"): + raise ServiceValidationError( + ERROR_NOT_CONFIGURED.format(provider="OpenAI")) + + +class AzureOpenAI(Provider): + def __init__(self, hass, model, max_tokens, temperature, message, base64_images, filenames): + super().__init__(hass, model, max_tokens, temperature, + message, base64_images, filenames) + + async def _make_request(self, **kwargs) -> str: + client = AsyncAzureOpenAI( + api_key=kwargs.get("api_key"), + azure_endpoint=kwargs.get("endpoint"), + api_version=kwargs.get("version"), + http_client=get_async_client(self.hass), + ) + messages = kwargs.get("data") response = await client.chat.completions.create( @@ -343,7 +401,8 @@ def _prepare_text_data(self) -> list: def validate(self, **kwargs): if not kwargs.get("api_key"): - raise ServiceValidationError(ERROR_OPENAI_NOT_CONFIGURED) + raise ServiceValidationError( + ERROR_NOT_CONFIGURED.format(provider="Azure")) class Anthropic(Provider): @@ -396,7 +455,8 @@ def _prepare_text_data(self) -> dict: def validate(self, **kwargs): if not kwargs.get("api_key"): - raise ServiceValidationError(ERROR_ANTHROPIC_NOT_CONFIGURED) + raise ServiceValidationError( + ERROR_NOT_CONFIGURED.format(provider="Anthropic")) class Google(Provider): @@ -438,7 +498,8 @@ def _prepare_text_data(self) -> dict: def validate(self, **kwargs): if not kwargs.get("api_key"): - raise ServiceValidationError(ERROR_GOOGLE_NOT_CONFIGURED) + raise ServiceValidationError( + ERROR_NOT_CONFIGURED.format(provider="Google")) class Groq(Provider): @@ -492,7 +553,8 @@ def _prepare_text_data(self) -> dict: def validate(self, **kwargs): if not kwargs.get("api_key"): - raise ServiceValidationError(ERROR_GROQ_NOT_CONFIGURED) + raise ServiceValidationError( + ERROR_NOT_CONFIGURED.format(provider="Groq")) if len(kwargs.get("base64_images")) > 1: raise ServiceValidationError(ERROR_GROQ_MULTIPLE_IMAGES) @@ -540,7 +602,8 @@ def _prepare_text_data(self) -> dict: def validate(self, **kwargs): if not kwargs.get("ip_address") or not kwargs.get("port"): - raise ServiceValidationError(ERROR_LOCALAI_NOT_CONFIGURED) + raise ServiceValidationError( + ERROR_NOT_CONFIGURED.format(provider="LocalAI")) class Ollama(Provider): @@ -587,4 +650,5 @@ def _prepare_text_data(self) -> dict: def validate(self, **kwargs): if not kwargs.get("ip_address") or not kwargs.get("port"): - raise ServiceValidationError(ERROR_OLLAMA_NOT_CONFIGURED) + raise ServiceValidationError( + ERROR_NOT_CONFIGURED.format(provider="OpenAI")) From 06104660161b4d649de6ef73fb5479e008897471 Mon Sep 17 00:00:00 2001 From: valentinfrlch Date: Mon, 2 Dec 2024 21:56:26 +0100 Subject: [PATCH 6/9] (WIP): Rewrote providers from scratch, added support for Azure, reverted back to http requests, more robust validation; #64, #92 --- benchmark_visualization/benchmark_data.csv | 9 +- .../model_benchmark_visualizer.py | 124 ++- .../open_source_benchmark_visualization.jpg | Bin 0 -> 109255 bytes custom_components/llmvision/__init__.py | 16 +- custom_components/llmvision/config_flow.py | 307 ++----- custom_components/llmvision/const.py | 6 +- custom_components/llmvision/manifest.json | 3 +- custom_components/llmvision/media_handlers.py | 28 +- custom_components/llmvision/providers.py | 757 ++++++++++-------- 9 files changed, 631 insertions(+), 619 deletions(-) create mode 100644 benchmark_visualization/open_source_benchmark_visualization.jpg diff --git a/benchmark_visualization/benchmark_data.csv b/benchmark_visualization/benchmark_data.csv index 08a008f..b71a35f 100644 --- a/benchmark_visualization/benchmark_data.csv +++ b/benchmark_visualization/benchmark_data.csv @@ -1,4 +1,6 @@ Model,Size,Date,Overall,Art & Design,Business,Science,Health & Medicine,Human. & Social Sci.,Tech & Eng.,Cost +Llama 3.2 90B, 90B, 2024-09-25, 60.3, -, -, -, -, -, -, 0.00000001 +Llama 3.2 11B, 11B, 2024-09-25, 50.7, -, -, -, -, -, -, 0.00000001 GPT-4o,-,2024-05-27,69.1,-,-,-,-,-,-,5 GPT-4o mini,-,2024-05-27,59.4,-,-,-,-,-,-,0.15 Gemini 1.5 Pro,-,2024-05-31,65.8,-,-,-,-,-,-,3.5 @@ -33,9 +35,10 @@ Yi-VL-6B*,6B,2024-01-23,39.1,52.5,30.7,31.3,38,53.3,35.7, InternVL-Chat-V1.1*,-,2024-01-27,39.1,56.7,34.7,31.3,39.3,57.5,27.1, Bunny-3B*,3B,2024-02-13,38.2,49.2,30.7,30.7,40.7,45,37.1, SVIT*,-,2023-12-26,38,52.5,27.3,28,42,51.7,33.8, -MiniCPM-V*,-,2024-02-07,37.2,55.8,33.3,28,32.7,58.3,27.1, -MiniCPM-V-2*,-,2024-04-16,37.1,63.3,28.7,30,30,56.7,27.1, -LLaVA-1.5-13B,13B,2023-11-27,36.4,51.7,22.7,29.3,38.7,53.3,31.4, +MiniCPM-V*,-,2024-02-07,37.2,55.8,33.3,28,32.7,58.3,27.1,0.00000001 +MiniCPM-V-2*,-,2024-04-16,37.1,63.3,28.7,30,30,56.7,27.1,0.00000001 +MiniCPM-V-2.6,-,2024-04-16,49.8,63.3,28.7,30,30,56.7,27.1,0.00000001 +LLaVA-1.5-13B,13B,2023-11-27,36.4,51.7,22.7,29.3,38.7,53.3,31.4,0.00000001 Emu2-Chat*,-,2023-12-24,36.3,55,30,28.7,28.7,46.7,35.2, Qwen-VL-7B-Chat,-,2023-11-27,35.9,51.7,29.3,29.3,33.3,45,32.9, InstructBLIP-T5-XXL,-,2023-11-27,35.7,44.2,24,30.7,35.3,49.2,35.2, diff --git a/benchmark_visualization/model_benchmark_visualizer.py b/benchmark_visualization/model_benchmark_visualizer.py index 7eb7593..724c3e7 100644 --- a/benchmark_visualization/model_benchmark_visualizer.py +++ b/benchmark_visualization/model_benchmark_visualizer.py @@ -24,6 +24,8 @@ def category_name(model_name): return 'Anthropic Claude 3' elif "Gemini 1.5" in model_name: return 'Google Gemini 1.5' + elif "Llama 3.2" in model_name: + return 'Meta Llama 3.2' return 'Other' @@ -47,7 +49,7 @@ def categorize_model(model_name): # Set order for legend category_order = ['GPT-4', 'Claude 3', - 'Claude 3.5', 'Gemini 1.5'] # Add 'Gemini 1.5' + 'Claude 3.5', 'Gemini 1.5'] df['Category'] = pd.Categorical( df['Category'], categories=category_order, ordered=True) df = df.sort_values('Category') @@ -63,16 +65,20 @@ def categorize_model(model_name): x = group_df['Cost'].astype(float) y = group_df['Overall'].astype(float) - # Fit a polynomial - z = np.polyfit(x, y, 2) - p = np.poly1d(z) - - x_poly = np.linspace(x.min(), x.max(), 100) - y_poly = p(x_poly) - - # Plot the poly line - fig.add_trace(go.Scatter(x=x_poly, y=y_poly, mode='lines', - name=f"{category_name(category)}", line=dict(color=colors[category], width=6, dash='dash'), opacity=0.5)) + try: + # Fit a polynomial + z = np.polyfit(x, y, 2) + p = np.poly1d(z) + + x_poly = np.linspace(x.min(), x.max(), 100) + y_poly = p(x_poly) + + # Plot the poly line + fig.add_trace(go.Scatter(x=x_poly, y=y_poly, mode='lines', + name=f"{category_name(category)}", line=dict(color=colors[category], width=6, dash='dash'), opacity=0.5)) + except np.linalg.LinAlgError: + print(f"LinAlgError: SVD did not converge for category {category}") + continue # plot the actual datapoints for index, model in df.iterrows(): @@ -102,7 +108,101 @@ def categorize_model(model_name): fig.write_image("benchmark_visualization/benchmark_visualization.jpg", width=1920, height=1080, scale=1) + # Create a second visualization for open source models + fig_open_source = go.Figure() + + def categorize_open_source_model(model_name): + """Categories open source models based on name""" + if "Llama" in model_name: + return 'Llama' + elif "LLaVA" in model_name: + return 'LLaVA' + elif "MiniCPM" in model_name: + return 'MiniCPM' + return 'Other' + + # Categorize each open source model in the DataFrame + df['OpenSourceCategory'] = df['Model'].apply(categorize_open_source_model) + + # Set order for legend + open_source_category_order = ['Llama', 'LLaVA', 'MiniCPM'] + df['OpenSourceCategory'] = pd.Categorical( + df['OpenSourceCategory'], categories=open_source_category_order, ordered=True) + df = df.sort_values('OpenSourceCategory') + + # Set colors for different open source models + open_source_colors = {'Llama': '#0081fb', + 'LLaVA': '#ff7f0e', 'MiniCPM': '#2ca02c', 'Other': 'gray'} + + # Convert 'Size' column to float + def convert_size_to_float(size_str): + """Convert model size string to float""" + size_str = size_str.strip() # Remove leading/trailing spaces + if size_str == "-": + return 0 + if size_str.endswith('B'): + return float(size_str[:-1]) * 1e9 + elif size_str.endswith('M'): + return float(size_str[:-1]) * 1e6 + return float(size_str) + + df['Size'] = df['Size'].apply(convert_size_to_float) + + for category, group_df in df.groupby('OpenSourceCategory'): + if category not in ['Llama', 'LLaVA', 'MiniCPM']: + continue + + x = group_df['Size'].astype(float) + y = group_df['Overall'].astype(float) + + if len(x) == 0 or len(y) == 0: + continue + + try: + # Fit a polynomial + z = np.polyfit(x, y, 2) + p = np.poly1d(z) + + x_poly = np.linspace(x.min(), x.max(), 100) + y_poly = p(x_poly) + + # Plot the poly line + fig_open_source.add_trace(go.Scatter(x=x_poly, y=y_poly, mode='lines', + name=f"{category_name(category)}", line=dict(color=open_source_colors[category], width=6, dash='dash'), opacity=0.5)) + except np.linalg.LinAlgError: + print(f"LinAlgError: SVD did not converge for category {category}") + continue + + # plot the actual datapoints + for index, model in df.iterrows(): + try: + fig_open_source.add_trace(go.Scatter(x=[model['Size']], y=[model['Overall']], + mode='markers', # Removed 'text' from mode + name=model['Model'], marker=dict(size=20, color=open_source_colors[model['OpenSourceCategory']]))) + + except Exception as e: + continue + + # Add model name + fig_open_source.add_annotation(x=model['Size'], y=model['Overall'], + text=model['Model'], + showarrow=False, + yshift=-35) + + fig_open_source.update_layout(title={'text': 'Performance vs Model Size of Open Source Models in LLM Vision', + 'font': {'size': 50}}, + xaxis_title='Model Size (Parameters)', yaxis_title='MMMU Score Average', + paper_bgcolor='#0d1117', plot_bgcolor='#161b22', + font=dict(color='white', family='Product Sans', size=25), + xaxis=dict(color='white', linecolor='grey', + showgrid=False, zeroline=False), + yaxis=dict(color='white', linecolor='grey', + showgrid=False, zeroline=False)) + # Save the plot as an image + fig_open_source.write_image("benchmark_visualization/open_source_benchmark_visualization.jpg", + width=1920, height=1080, scale=1) + if __name__ == "__main__": df = read_benchmark_data() - create_benchmark_visualization(df) + create_benchmark_visualization(df) \ No newline at end of file diff --git a/benchmark_visualization/open_source_benchmark_visualization.jpg b/benchmark_visualization/open_source_benchmark_visualization.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3bd5847667bdbc5608f7a7592ea13da84ca47bd2 GIT binary patch literal 109255 zcmbrlcT|(v_b-fN8&On5M5-fI6M-)!baWU>fCw0p5IQ4OLX+NQ)DbBX1f@3#Jt08? z0g_Ng>0L+yk`Q|Dy_d`U?(eSm-gmwKyzA~~t*4*oe9k%hIcJ}}Kj-x4=@`dNLp=jM zj&tWYIL@7I9H%S}ZI1K5{44+N&!5SKOBeo?UoTy{cf-~+Kp?RSFc~a za)pbV>-vqGH@Rn6|H`OSYna_--h=P&$vmhk4)D_76%{{NKIe>k{*y`XaW zuk+_bIey_jcb@y)X%mOYzc4SJI}?uoCBI&{cBdR42HGA%mj47v&izz7o;qBv}P{F5SY9H`AxALwy z$dAac`sSOWlC};EY3`etRM^)qX%P7K6G=i!4-7}V|D6B5kv6sF=-hOcq0oO}Isd-{ z`tJ;XJ$tIeeU9UobLTH`oWFG8GROIUL7(G3!*l=QEjg`A(C0kfuM^7GMCEh8UB0dG zN8k85ub86gDU;*c*{i>BpXcUy!ZGuOW4Y$9|5aEH4iCKGxQSiR>q_Kk<$I0*Ep4|F zLqoFZG}wsUg$IiTWSapFu(1@;+^-Q+^huXfj$5V?i-$<2f%ip2`SWfI4V;j;PVn^VM&0ic}{>}9Ye_>XRkoiJEWX^-u z%6dz6grlo8NS9f~DUpR6!^eR0sQ`a4kQbX#U*-;3`~XRFHOn*zuV^$q0Mn~x zbxkU}b&$%5uBqEYLRnCy1(X|}Fz!Gxv+&$XG##{B5HG;K(HZK42qu-;AHA4@+x>vt z{&G+Mw`^?Cd`WH??dcX;WfDd1^&zy;br!HVntgn#vT@-{*Cqmb{07~_fcDV6X3Hy5 znbGcxNwS@qNCS1e))@o{ir(yQC zgATH2Od>*NxP7uZ8W$bFaR7tC&EUEQ1{Xf{sn4gRYCv(Lo^QqW(_$rmk93N56VoRiXlN&oj3qx&kK695(?! z>b5PZl-CsZGOA-)P5hk}wIQZ2p9P@_&4M>Mhc81*AubJ8eX%fo61NFEkBRmwAh z^YZ1TG&RR?9P1$9I=Tqks3-&BM@#Dyt!58gSMT5z6WiUesCoeFSo;BY`x5-!DaU)b z-EAK*vB>7=%2^%h{diiMLiCM|<*H*0ECv8Sw$Vr1JyP=BaR(Z?3Ci=_b{e$WMaTw! zxsi6$O6oR~i$N!Ctwi^DE@Uo`=KaLRR-|JYJAkOvoQrZl&i(#xY5%AE^2dw#bHk8D zHa17RaV_$hIp98?|ID2DD z*Vfk9Gy6i#>-@hia@f6ezXzn3Rt>!w+i~@*W_692=z@9D2Mv#t0%k(;ETf{%^dc%M z23zP_IxO!Nf8!-fYTz3G&!fN;{SFDMcM>KcpTD)#QekZgjMFl7D? zRv<1ff!BN`$acqnD8!2_`=jxni)VG>Ww#|d;kGY+Vioa8Ek{oyrE|Zw@E%~p*w*X8 z#;aGmpFHblGd!st^gf;FXpVmyRnQAu^K9@q{OjPHlET|7@ElLby} zEv6k(7`{k9UE_L5Tv;x*3JJGz!>nLkCBk?V{A`gVgiuD2?hrKWio@8sNrl{oO2mwA5^HtcU#gn9AoPKVm@WVVVfmGpHo zcr~7-h+GmmYk+{U;WO4U9!louH zziM#dTxdgVSKD+*XQT#i%U` zPP#UUd9++?6>?WUSjpKx21C)qe z5E0py!K7n$Z5}D;Mi15{%F*g9XUL*M&@+KyFqu$pRU?wJaqOBA^U?~yD~b6Kui~GW zy=|kb__-gdw|bxaNA#Y)cqCHO@-_d6CBC;>|U37iFuCy9XLaz*Aueoe= z?7URx6+tl`6nIS~oee7Ly6l*7f! z@17b>SfI*Y&Jp&bK6&`i_P4!6!(<#(gh?3Z?p!!g$O;_d6{@>-_*b0HSWJCi(cMT1 zRI5el%b$7Zsvz&Q@lwk|trH@l)EtVoNnn))6|a8y^m*`&Sd#LKUSYM|MuP93G%itp zVq{gQo98Yk6RTJp7_cwWhH|f8Oq_+>j*nnri|pa@?Y-i(1**`MohVwQqoS1^SfOQQ_Qq%!lOj- z7m2MGqT*)V>&ukps04P%=H}BN^TxgIiALod!x?mY{+*5;hFaC=?+a!H>giD(Awl^L z!6Msk$Fy*`E<$%M_CKjn6pM{_e=cR`qGVlUFL=e9NYDgzXHGec1O*XbPa=%%`4qLS zTT+7ch4O|Eisg?w>FO;NhkGBXSFBd@-Kpst`-!jm_6epvqF-tD6R(`k2k@x^SIK^( z%PH$D5G%eZ>;9}enOC=VxwPH27t}B(b$dWluy?Gm=6lMam%nmgEgvw9A+6WmMm->S zDOfW`oxqZdRf`Pv^D(!-7np~VUZ0TpxC$9c&oV{x8PGcw<9A_OEH4h4_~4J4zSDDpJ>0X9gwe zLuO&Hzpa-2C!mIMonp3bFqGT4qkEKKUWq}^a#iW%;i;_kbjyRFX&3CuL1EzTHs=VH zTq0HNWZK>8ayeZv>xIzp)0exB6->$MXT(j}v|JBOu6|~H#prN3_lE?$PeU4B*Ct7L z&}SdRKiI)2tB3F%yf zms9YRqpB)G#8QkOQ9$l>M7BJQL7%Kis<+g4LgiVaAudoRmdL}Gez8vn5>9e_;!mW;YmN^jRSfvF2%7Ywm*9d|Fpd{-P;6f3iWTKYOz z0Wp@lwi^R#+?M648h!A*eE6O;Wm#`%(s7-lsB1nF6E%ay26<-bxbQlO>Z>c1hTQ+> z{Z&;^ul05;ClMk%v~KFKS0{~6q>;p;i%%9}^G)?Ul&zo)1LL+&SfTkJ~_F$>GGYvA8^A3 z9g5PBe+Y5ZwE)hJeb#$eNzm0bQxCvkK?1XqGer-gh?#`T)^mF^OEj`A?OX=rbN`-k)mfUO~tDL8?l(IXUytuZ0z%0hzB@Mb6iL<8#=SBxR z=EH)astMLjwzjr-9hOr<4oE_^^vdzj;y)s)rO9Bo$eM3YFgKs>Fqa{Ke>&p>S7y_e z;XZ#1ief~8hz+q6MOG2*m{?T5 zBR98t8+oNS{Y~ck%NE;*Z#;!mGW*i}d;bn`Gpbq?1}>C*snDI;Lf($!wKI~CY5Tlc z1UE`=o|&7njmb<8y(h>P`rvEJFe4L;l6WqpRy@UdgUx~wtjzV6Xyq>dtJt^Sv+V%Qx!>e+*yjFa*3F!Bv=+4ej_#cip(8DFjEol~WE0Y(Xg5HY#HyKh9?Z zivftHt?ojMRp0up`(tXH|8~=)anhDn*q_aVO)<%qsGnHpJT^iY@~oWD4&>D=&cKiY zMtvcI(Uk!XL{amx@mKST{h)-_L^IVDpQSjR7zqSo=e{7-yvE6sKcj#BO4JXGg-^Sn zQHZ-V!kzE77Y?x8CkPar>{Dgd58mPuiFZ2az5qf@)LaH}Z`ZXlcD>ZmzD-f;S*hA6^vjKk&c_Jh=|?~Q8IYlA z_UNGDtnfTvp&{iRP@vsKwQ>U4R&!|dn@5Gw*YwnsM-&=mNJ}9c*=+?3#1Y_&eHmof ztBhR_<w zQW`9krn?sK|k0s#iY;M-2!+r&J32&K%dp_HX<;AmoJI?+3+;%S(kR)Oyl_A{x{e7 zosQ0v03NK;F=$MzQjC*ANSz$$@|D8aKQ>t2X) z%%;qtByMSVf+{plg?R?pXq6;p@O0TnW*3qK$~-PUMXfBjfYdesn%!siQxUfQSE^CA zc&LhBY>e9c_9ugiHg{y#>kux=}rt-PC9^KwJ2md$xf`6E+* ztY7r$tqIG;=C?d79021}e%JbVxY0d9te5}&CZV4fQ zU^qIoijA;d&~dXoa~RD6L`4DE+^C%tSE*|u2959RFb5ugPpbM)j2m!e?VJHZbbLsQ z&q?U=k_U-_ePwkPq9=-DR%eQf#I^b#|3ZDT_UP>|u~9wZ&(T)5e^4ki7i`+|f(^Zp z5eu^^I8rVE;ijy&u^lL{%B}RyMTnhV1!0aUrmN~AoZ`&V2GyPlyz|J6rKr<=@_W4E z{|43{D?gg;)-u*RU!w8nOCW}lkVo^D_#UQo058n*9|#T4^Z!EbBPl2RXv&FBi;Ag@ z)6t#wMHH_TNynmX+U0(ixqJL(J;}~vZ{UfRv`5b(OjuZ0(Un?|4i+<~9|Ma0dyQ&9 zIS?P*EHkhn9fWlw%2E#}N$~aXn`(iUEffQ-UGmZ4gVo=aK}}o!!1mj& zux``2yMrso)!TzbDSo4Vu#0L4miilL{}eO0b3`{*fU4IA) zfFsz#M>_4+HI~<_bz4%LZF5E}ic(iTLg~!=5#ACLvDfmG-}`MNR{aQuX|+6o`Cdpr zIAZ;wO2L@2_I9sBt{_ayVpA&2CsXCf^>T$_fGwW8>VyvGp4dY1SjSiq^k? zP7OC3T$d^PiS17lOCP$vSq;usmg#&%qysH@tV<}$S#dTgN!0r;fU&4*0j(ix5IbAw z6_ao`TlI~VyM=}yPF{!gR4KMwod*LE`smr7&VaLPH$ck9&Pyc?Dj@KCx3U|xP%FPs zX?=p{DtGE@Xwud>JlO0-4t#K!w%_{Gg@s8L(j?Ph zE|3%LYA%}ftSq1{29Io``;<|VmG)MqwKaDqt`FWn_oIhqdu7e$Hxg^R5sc@m90D#1 zY6n&RL#4qWqJUvn5&ro)ZShm$2l zSm2=Lfdx+h1AIO>Q7Qt_YbN%I$)+()dirG46bOQ=RayLfA&_vwGStl}+^8R=*FE)8 zwHJMNe!?fvdi1RimQ2vm(V1SP_?ko-x#Pwct-SH@$Yeu`o6R4sb`SxKT8M!_X%c?4 zNEt}})QE#&C1WzJV4eTK*^MwHZJ-X|2* z43=qrRkdTY4$S|781bfti$pjU&P0SeAcNiPbw<;+;*I_77Mh3&*Mp=AvJEYZ9V1`Z zmbVMVnUT@DLzcy7QJ@fjKvc`hE$1-E%ysljo}uMPXBD`RmiL{O29a!K1oIK?=~pcY z1vR`4tPdJW8bXw^xh*ru2)-mG1`U0^+ZwQ!%A29@u`39_rIxh|G~@YW(M{6{TsF>| z*v8&P_=*K89Tq7~_|AMO64e@54jt2e|4LFZi`n;G+v83rZaxcPz?*vq=-+-fo1|W{ zqf?lHDbgcB*&?GTz4}fg;5UCBYoAgt_)B%86V9tVBcGA!myCZ(xi6v0#G`73uL}@}3J!6H-!DJQQf2|I>qO zVAs3qStA$$ecIvgJ7K}3n~_${HmTwJ!AZP)P6nz?I)=2!e-@L2n;M*L&lizpZu~Wo zsyR~sW)#N7C9dL&{S%LKMjm}gg%im^t~(Wn}dGUt#+05^=@TAq?Mp@ zfmObWG!H&}^vyx+UZBF(AJo<5|D1qSF84ppF?62LeuqT?%%5X6>kw4~+u;{m-lwF| zU~SdhF`44? z)HLekO^}QwX#6VVewjHnZJlCnJ%muT>xrRz*$MBmygX+&mjaR_cYRtyfu;MP;Ox*wqBl`j4WDX6Z{7bb@}|Gr)qbtI7v#5-nfRV z#yR^%1XhG@m)LVA*YA}M$N3uE{Z1;!ni|1L6yDOpXg>b}>ObtFLImrT0nAUAh1Eo} zayxtYYr@-tUJnXB{-kRJ1K<0sW;INRvpf(`vDph$<#TeY9*T&te@qDt@ zFf?#M0q`Ij0a0;dgq`09H=3WFMIB#?ig66; zdLV8qXlTF4 zY_SDz>7jfSS%QtjQ>@awYpkX84(1`oTV~&zMm=ar+`OtISSmppy&&s6Dg-j2@zPRX z9%a|X@)A)vX;5K3xNu=)qA+RIfx(L~eJH%>zl|)(KIO=LS{?7TPqsLjMMnr^PVBR; z_k~{gS;moa%^p+TF;%%A6U`8+ty^e?T~*!L@v1rubCg}$8dG95L9qTXvs;B4w}zfo z(&^9I=IjEBy{NhgO1}49-rx{IvodY?X`2}y?zoOF>o0q1$0^?7L*(3{t6OKydZ)AV zfutphQW|mVOLj89W$~bIx5Cf)B&CHJ@r;c!gzfuQM9t%IyAR#>N*K1hzY#6u?}~+% z*tiyLsG*{NjyUDx@^y6HEjCc~NNw1`kmb4RhS@9nWkXU&%MK^q)~{+}Y#FB(ZVl>s>E#asazOz9RTCStIKN$)6{YyZ^f0~1CENy%%E$!OcK%a7i)|7V@ zC1Pk7`&Yevxl0~JRV=lo#!RufyIjtA9j38Ys5^n)^UjpXJIfkX4%G*0pyE(T5^ZRO z{sR4EP%y_lKDLPl#E`# zwN&AnJiJKhn+OShfvJJ&48Y5igFFIpmJR-Kb9;ecWj{Ng-4<+2Yy({QWNz8}5&sNj zUYDl59r&OhSG$}Y7E(uF%6hW$LzR22eI1w=$_x96E&kKFZ_u{ol%q`~__!o+|G{6hN&E925L;LSOIU1)N)tRw7B)j!YhkG+gIXBCNAxoC091aVC zhuzc@eq+o3^BR(~k-G0Y8dWxXv1F759NZs@3vz!f+K_ptD8jwWZq^wzUc8o=6OO)@ zQd@Q?zq-ktSX8+gS?rGk55md?^x#e^=bh8qIe|Hd-?LEnho~zvz+h(fV_bq}A1_tt ziEOalj??Ijm_&BUx)j$Z^s&`=o-EcSeRcaYY3KSrEGfy&v(RFmQucQodoCliu;BoF z2g!a(5AxPw*2=v=%tqC@e07a05$OmahZWA7jvfxY0qLfj(1>}kgI?FKdne<$M=EYv z9-#DShbAI=W6{CaszMY+U8`(AX5S&4Q`pg8JSTvs_n_tkp z$u79~HFsh51InwP#!|r|hIQ7p`b^m3Q9Zw~{v?zu@9}DQdHEy_z z4~E@J*cZggDs%$G5XlQ>PBqxme<%0W?l0vorOdg|bW5 z0&BZP!P=2qR+zs(ey|>r?*`TKtj2oXJE-p%a`X*G1g9@2wG9-Jd^B9*WV(-G zZ5c&;S%TG;FL2I=mLZ1&_q2pBppic1!Fj%;w`7enXR1?PVf%NFz7*X*Q%9^O4yg$q zpPA0jX;)N49&Tq1K;n9Ptheyd>!m*Jw>6?xT$I0Im(y_*%an1OJZqV`oxi55ItM;F zC0Ci+4o~5lYo~@5mdoHsB{MmM<3=C`a5gCTEGO_n+u}YkH@^tncUW36`n|$c>7j1o zfvjuEX6>C~3*<=M^Vx`yxS5$i`4#Fm+dzRw$0~6wN$A&?Ru;|9c1XXrMSygTjH=kJ zdFW*WoTQJhQnG|?9Kx6oVQYqU&mi+k$>c7&xLm$meUYZ-@!%}LaGT2!v^8KIl01#z z#K^S65b&M6k*q*e+pOdUt;5_1-y~FF~3eRLG;M+EDR)=MB1*bXS#i zv7}E3p}5FiOv)bXLD?h)z}@;_Me1wpi{yDP0hN~b3t8!jV@&sKbaLUt+}VN})3zR;x})N{~G26iYbc)Qnpqo8w(%%BTF8S_XM$p*)_qs@@EX0DirkbGoArc>Cj;$5r#CyxM+rz?o=f|KZaNi#3 zS;u0c;VA3|4#88{_AzYOs5!s+zqqac7p$Y})jRUCGV!SMbxVm7ryM(GcL^4(`(Z6U zu{}!cbGgNvh;Uubr({9aNT8`|=5W0+)vL3fRW>i5-Qv|YlN<43;*m|}_Bf7bQH zYxqf*#>5AEpA3{U<`&gb9U1hNzQ}7V7sc|pE6n8%xxIa7&;S%d3Q-kQXWUYR@03Sv z?Wl4s{%o-Sk~maC_A5@bp2p_p`k{Sr{o?M4vhnq^{x%28gWs-Od~zc;M8#f@cavzn z*hPT;81nsjzaSDD9BlulUP_$|_UbDU)!*?AJ&Hb%F&_Y3;pu${!>b7nb(VzWB{&O| zG)V(}(y@chK5w4(B=Z&~%YG}pa;rM1FEg{Y{vmO7&?g?U&M z?t9`k6Qo4FV{(!zIiZ6pirM-n$b6}i5k^%msP(?_q2X?0vG54|Sv@J%$_Ks{9etHy z?|d@9fDdS|fDz_Xu^Y9ckqBmR@c{@MT$pIL)%$FHDZM_Dh%y{i92PBEneh8c&YBg) z7Qq8phYMclfk!=?A6^!@5Sdts$Vb`xU=O_clo{}g`>oy%Y7oihDip>#x$w)xpq`(@ zA2spSf2J#^4vv(JVT%%{A@I3rh>dokQAuKe#|S{#qpusRi~(k1sYWM1O=gDPM4;vy z+7O&4u$d`2pt{_XY#E7@WxErLDVT(dv(#pDQv(}!%F(GY)!9@bVo~jWW3%XtE9LOw zNjD;~li1G&$Gr3i?8N&m`+uj*`Vn$G7`=(V_9ayB>Sgha?zLsG&L7P$NBm^o*hDz@ zEyYfViBx|J8*h-U(J}({mkg?3ezoO>vYrt`Jp`N65A7zNu zI_>jz0@{H0kiJv~#0sg-KsY9Yfthu$KuQgl__KB|PQ)6^>|6nuTH=;L#Go<6k+deH zpI>0g#BT(-36OmckS)J*vH7yoOqD(5wNLF_fP9*#m|saEHklI@Wsj{EV>i6qoAM22 zvUl9l8nD?!(Y>+Zkwd4E3U(mU4GmujURX=LTmxWmA%sI42#K@*GYo+dLFB7V5y!BnFXy?PXM*d zO7N|g2SD-2$o&XJnS%|{xb3;`&(_encr)r2>DS|yS#I-_>h+W&l^#JWVgJ|*^?g<> z3!Z8;hG_WHEZ4lAb70rqUY2CJOmX)lFmJSHXPvN1PB~->MKvsYt0j%;EFZx_=^+D? zkP3u>k=6Cg4P8agX+P8skhiVAS;r^SOkLXh%)ogpdlGMd@MGZBCq4n=0ashQu-3yz z8K0ThfiuEBM@{gfHe9WnppzT#lRrdXO~$x5e)ZbV2R3}=Ga*28^7sUeX)kvM1_Nx) z02SzXHhRecC5)YMPp)TTW0zQkt#^!RYiBV<&|w3SuZ-YbGS~b|4gZ+&Y_os(E6Ar2 z;3E}Kx?^E6uE6b1VO6dbrSX?KUi`E!ioD{Mp!_e_oQZG=(H**s34HgBbgR5#^Z|{E zb(5*44CRG*P>iOG%_k$a5dsa|DLaNUsV9&|#H_J2sjHyDoRH)OZ}JH&5jOPIRg*m2 zSnXmT_{Zt1r|o!ozw_vIE4@wehb6R1TMYoGc-N#gt*n<1XCE7IcOgdo!Ar}#>+2SQ z3Ov9G75{s0g@Esl>_pl;Gl4VNfvvLs&*+FB63yEWwVw?DVF8BG8pgf>Y~uDd%>L?2 zt_yzbeAQ(|>1p&sp8z?{DZ}oMnPHr3bvHN93O7$cA!_;Ti={*)D z0B$N46r6-Dj@^&ffwRKEyz=zJ_uNmbmT{Ij;op}jzCpcZBy&=3=4Z2DVE%^6r!3o? zH={`ip*mhDLW9RqIZoEOfghwQn}_a4{yuU@^>48%5QUO5mPdA@MX}YMRIxJ$j_2`} z8%?9&ZsZ)awNFvD>BeRKUlBqhQ{Sd1?wIP;p-}yu?Pw0d53IbuXtEvrp zSpK4=lJMnK0u~PXf7WtV7Y!_iUww`aX|yDLO)CA*1X`eg3mk4&Cb= zCP%ps9*6ZutYzGLdo+4sQu!@s_B~<-Q9HcCf_9JGJR*j*;Nd}84v106N^ZQ!#w>#F z-WVZxD5bgQ{$~?=;{1e@qUJ(X;-WjDNMFkNSZJnG%LLxU8=H!?e%jW=}%AlrpBzC ztzbgbb`lA!vglI4Od`DX#a2)KCp5a3Gt;uZAjv`+R4|Z!#_gDG4Z!I5-6Hce<|IoU zs%aX_@JteAt$D5u2olijg}&tBk&Jo_n9MEs@-;7%XEMxs{;Xs1b@fF*ikH<)Jm80Y zb|X!iZd`SnETA0(YV%0OI(c+>{EQy_)@2W&cu;^DV}I6RtaZ#5?(q^C4$8N~sYXR; z7Rm>p_5>KI6Sh$}a?}=Q7bC7_gDv|OB>qF%lx}UAE)YVUZXvE;3yT`rnKv(POY2j^ zs6t%t<-+`Lkr$t3+?x~<4j)~%k>!?o{+MUNWN;cz4dn8n~LD>sN=H}UD^ zHm;q_G+wi6HA}KuGYTeKx+@p6uhE#RphYh~-nN*gWHjD-pdlkrR8&0;%2Le=u<U zWJWL29<~D_sLsm?P?d9S{%{RqJ^8L)onIlSbdQh4CWGc*k(!rWYLH zCI47;BCsZ1stDXNWz+|o)YXB*pQ_{>m#66g0|#m$_bTCypow;eglgMmW%I+K;M};5 zoXjY6plpm-LU=xhs$52_b5MXXF=9V|>PfZCy?;K;m z<~_;uNfWdhCu++#ENiP*Clr8)DE!G-21n>Ft_s;tMnrQGB>X$(6Z`0z-Z(YNV0>c( zZmQI0Rf)CNu{_#P*OC1_nbuNOZ{c^xQ}}_rfZ0R^tQ|a}mStnv;d-xYPEekjr-ln) z!e!s9>7T5c8Ry}l(S6Ec{+e2b zBtd&w%t4uv>8BB-GEX}j&y=g|9^!TYE)h$q4+dae5(2}Bbse*Hoy!OxY}vf>uFHx| zSd&UMZN4<=R=Esm5mkzQa%H~IKJVvhI5)KH_>u08ph;AAXI99!&Gh+7U%mjRr~%{s z;(HLpRlti$-)eUrFpxP5ga&}C(|CFOrx8C#>T>F`Emo3eD-Y(&{QRmcGlYC7z0Q`! zGiPWSD0xd#teT}ko{3qVsnMda0CoxQ188Ba+1h#*7e6(Y-SaE=-o)%x z70nH3X@NFIjSr}b#P&*1fN>vz(4@H-|ogz4o9TSX-3{Qg5#2>90lg@P>8;XJ$(3u3ING5F|R#) z-%eB+quCRoR#cHn`NZ?xfs22L*P)WB@|@x*=ObUZ_>s|WA#)5BMJs{${N|$Qqh8ms z^^E^~$aw#_bGT8IJRQt;3!an$sk0`Ll5X^roGr){BKX$z8e{7;BX10GHz{d8FKJlY zOdb|32;af=RPn(f6;fFadL~cV2VzO34ov#4Z9{eIwTZHd%T|xnVguT@v_(9vVr@~= zrIG2`g%7aHonlFyS0@v?XYBz?gK~&~UHuGCVnd9liGgaa3fM*VdKwGFV^s68Lfg;K z;zhoPt&4%8e52%Wcc_xl0STK0!#zU}+fEn03^rU&q4+8Eg*m z5o)+V7kA$uWv4T=L&@~V6o%=Z7#fmk?A%X#Y*d}FJY+q9EyEIYb|o*L%~hPgI_xvT z58tyM@#_Koiv8BC(kw25Y%0e_-utWL zd7rTRSC&OY4NX7!ut1Fa^R9#g?^%Rdn4+X>uga=s1oRl2EoC&y^vejZcXJGqJbC`; z5b}MLw^=8AJDlG@R;}tqo>857UmF?~%6%x=7WH@P(kaK%3L{RDf#Px-Sm3m}oXW3& zH;Xg_L(d3AAN<0X(C>QUKImz(l+&!SXB-Jn3Li<`N-oP{f-}1@%KEn&tF`otBqQWi za_T{=J|PPq_%&E*x^N86m){LUnI5 zk{C4k^z`gIz^Qa3SzrKE!Z`Z@xgRSK*@X>V0U5^|ou?eozjmx&`lBt%t-N^1`7hbB zDSG-#0U3_I(rXxUtg>y^W|fcVi5qeh!E{LRRC;PntQYbF8ilGW%FmYx&@^s2Csi74 zkc^R!b23T;7Hp1eXFYnR*5Qi_r0WjrSDLQ-H|-R|U4ona;N5BSMjp9fYP-kh+L2O^ zovqQp29IGXf#n{pzSlFJhh71K&M`)!Eth{fWrO7#UBXY2R0ZnpU>x0&gcXkK5CYk_ zaO_+x?&$pb{oGWpM$_%9LL^gjeEb`==s&62>P%wC&$J2wYFX({u6LP&#wX9lMLh{j z#oDG(Q3yFh;re%g>jRH|IB{0EuFOGz;V8UFb95u~1Z>Udz}FCeh(uuCJ4u~9`+V=Q z>ytgBZ$(HAsY=+Jh~IH#ANw3X(-pUn!ga3FyM}(o3%L$p-$y2AM2#{ta(n$;k%rIp zZezL9H1X7JrKJ54#ZQUAq>DXCu999`ef|1_a*w4*xOM&(&8PjHg=!L6YwC?B*$mfY zuBHGW)IK|S;nvk`gu%1&QoK&6Wl3iR0($1wLWFNd4Sl$Sb+6wydY-K6kzkFr(AgVC zJA5&)PQW450KkAD<9IezQBhl8#fI!`t?fbzaIcS19_$UMqKl0cC0JF$<$gcTc!pj6 zR0JG8qp>F_X{X6dg8^~~s|MXO6Z45yEim2Lw5a0Vy{gf@?@era=cRqF$-5Wcx&ME4eMFA0-T#{Pg$Fpn-4RagINz$He-5 z-9K}GCCtd zFu{O;bfu<1kU&5P#X_%ALJT1wy#}O&4sK@O_w3p4ySwM?Is3mE`0}p6AYU zf9iEzpWDu)H5Oh(YiDKPm+#RTQH~zF=7RFWxiX4($OZ6~8m&d-vy%Z!np~d09+Vl4 zjmHtI7ZZkZbRKzy32{r%gO4{*PLWHT+3xDIzGkwD_a-9(X*Nyiqx&TUWhtJD}S77>HU=y;W&1gGYdCST7K&Qx{&RDAh7jUdg}Gr&_fn_p?bfT zPZUK+tbQ}ePd2kzOPq0H-tj8xTHBq0ee%VZqSB^}X)cdR89gYQnmtE$V96|j2EAXWs+C9HHjd@ zWKC>KU0Cr!a?Dg@Na_r|-sM>YTwZPcBlBRm%flT@>a1Gq4OM}Sg-QUUH=*}ZGC5^e zjV@}HMA8&0>^q62%s_kw31U z>67P62!+OxZ!o0-3|COopn%dL=dxK<6{bx^PgCv}g6>Prvh~JKxV>nY0(rzw_3J2J zc$=7YIrX%>6v8@+FEWkPF^Gh2aT4elu3cb_z7gyLvAW3B$9(PdqdiD zg?n9H-?joE*}pCvX~!>}x&*X&bA?gR!@AqT&~U$2-`>2?k6Um|*yA5l5p($()5A6# z>Q1^f)O+2B|HcQH$=vPnjDfrF&HFx;I*rtX1WNK{6KR?u&c)A{S(9U!u4YRQ_z__3FMe=V%k8oMB^XX3YOQ4E z#q<8&-*a(tN%Uh?z^(G8`vcBpE9EG>TY3*o9>QA4(iYIB!TlQ9pa27C;3_3PaXrFf z>{}^n$@iUWU9+TVL^HOtema2* z=uEw}&b}Z{LRxu&y>NkD5(87^uqI+{nyrQtok@#0@Djp&eZv09qam+9y4E)Z&UO)w zIhXtW89bw)xcK>cmpm5R+Dgpc1}w+N=Rl#+aV8HtfwIQ8(rHua^)D{8-*91dRt;K! zz{2_Y>&EeAA!8zx4)Xr}R}EPsOL6lrF5WlE13MH{PFIZ~9E!}(!5m~^5-kAZm5K^j zh%R`~0y;n7{A8QKNI|H>lslCvIaMS9N2?QkoT&4*mo}D}`M@EQ!F4h7yww;wD;=);-$;Q|Mw@mewti7@rX1Qv5 z{tT-u>b5JS%I5s_CjenHByxC4toTsM8~f#cvWkr`o{yg;3!rcnL#!Pd4~jFt=5|?1 z-l|_8sjEx`PhO!&n}Z#>x&kG*%wj=m=)H6IjXAS{GR&q_9A=zL*}JpVgpg4V_^8tG zOC|l2?qb9|iL%Ld_ImIDB*GNZ*mT^k72t~nVz&Fu*H~i2rL=aoR6X6N?RfB;Myc)o zen;tQ+ZkJ-)Jy2Ab!~Mq2x=94f1Z##m{@`c9%KKtg?jwp!--Z7ub_#=JuA?Y8BSph zORir#^LbJhGGVbfG|8s6hYR@4wG;9zzf!G`>==_`niHNtmcRbtt0UefIb{DDCfTqy zVNJKpCH|3$sEN*ca&#gC$?@MydZt%w#9&$&Au$5md=O%e$Y}0*Ew0&R=$_Vu=_f?1 zwi#u%-|N{89|sc!w!2TI^`Ob|^1Xl%E?D^eZQHNeu*$YS$k8Ulj+m$EgdGMfJol7* z!E?rr-=z`o_99Ny=O@Z+)jjY))<=9$D423|P2K`=l>}!Sz_Nh6jaa#gtDRq(jKcuVM zJfh7XnDqf7HjGPyGBGpo0$UW3%*E-u-`zHP1oqQlitk3Y=vOLoX-U@=sCRA=dzC*b zE2fc6y;uth!<{c$*{MV81=_(x)5n^`VLAzyy- zoHgMBTB1f@d{n#~dax?`ew07LA$1Is9Lxg$2s9t25|Eat5(jO^Tf|n8FP^{`Qv-Er zteA!`f7oOFhmblq^$bCS-$-sk0dTdc837NAI`Y})m%9W&3T2sBMJC%Q*7o>4fs1Uk zh0KW&r%yCp-bRkaJ?x!!smOFXGJpH5R<$9!ht(j;Me_)_r35b`^kk$ZrzbVjI*9f; zN~bJ}mU71(;s^AMT0A@u45J>&T`ugKfR0&VMf+_*ovK+DQ&2{EtA(EFvR|g@Vq77M z>967>r-SYu_vc6j$KzOa%VkD>yVOmoMq)_h;|1v=VLz0dE59<Gd;SjUOd#ewi2acv?&(?aNUQ%#5B+)e7(r;1 z9ZU)KF;c~fIr-pbS#F?hGvS6Fh}H3F=$b^x`z@Cznom7AHp>_ILIRTXp!R2Tcp>Mo zZct~Np36yyH=9-h1~!qsTe70zZ!%m=CcY3JAN$Plo2@k4!M+F9e9Om>b0^+B7`i)y zhT3k`&=?)v)8k_o_w4e<7i7sJ;^(pk!3tm-J()li+Ib^CiQ6$y!bEs!l&lD%M}s z5Z`gtT>VnM@oZ3>otl2IEPGemC-&Q(+tCKMHx_qYzvpvazdR~9v{-G zu$9*H>nN(7kalGzLc0xVB1Ufu6}8ir|FjbHbS>*sj@5+>C))K!MrlXVXub&_O(7wn z%gnwx6c(8VD1V_XaQRv7s;7(4l{jl%R98vD>#tIT5wkLKUC#BKtUXeY2ev#Rgc%$m zA+bs(AZ7TkjGCzU6?|LD9QWH}Cx5>=WV!^JyOunmqtn&SSeQ*Hja7c8G^j~vdi#0= zRua~7%4e3~t~T^T8WL6}9Vv`zEad18N=L{rB?yLDgRG}xmw*4B_iHCG%PC-M zDbqUnnL-3r7E>!qV?-aN7@}na+xmIY(sro5mB8V8oCrUOs4G}J+;i7jss?9 zFw!L$5AObfX~6*HHSdHJYlN3{ThrJGLUJY<#pSZ}2`}#yxnX-EVc&2#RNmVqS#OtP zcK!|Sm+RQ?^o&BOH0ygOItOP|T4wqw`X|6v^0X_Y83+iG9gjmpoc0tvzMk(I`#mUX z{b?OFzUh3koR@En7K+erlbKKw3!uAt+V;M8qSh{$ESSs_xP$ljfh zl<5;mmz5XFG>YiV5s0mCi8N#SBt0cA+sJ<24@L@lF9-@r7T9LNE@`BK^NZWFuE=^- zO4SGtP+D|&helHQ z3)7=tZaZT(EQ&=<+!`&SgRKZ9A7(Zwd<9c1;Vp^4b*h&yk!@fVHaCmga(x2*YbI4a z%IfrHXE&K(?Nmm2tU_)j)Hh5&&@E4x&kFKKvFHV_UyVpJT%LccioMfre-ocTUZG}Yk~}1iO!Y3yKkiD0`^N?kF6A^%JuEM* z8-4)&YRd4LU43w>VL2#5Q_x7COKiPq`Blt>>iv_)TbBqfePyKUsR{BSx!+RiNi8!f zn1XHv0UcR~St|}yWjTe*I=-VfDjdo{y*(!~*CF8 zO>6xaACk+69HNC2dcC~DwZ;rtomMwtFsp7FVo5lUcq2i0djW4;b{V#4YtYEHb#9XDkYxhz1T@69XHHpe@{H$N%fz$ETSg%b_p|s1xqXcUgBQ~*?MqG z1fAxP9uAg@Zvf;cdvV1pgoa%;)foclMO+5m)9AvegZGvjk}%Y1hSB)bEC7opJIZIs z#@f$Oy3=k5&xM)}&CLdKk*5;At3A0AYbo6u=j~^9;>`Y>rYmTuR6^_nY)~&A4EGOd zczuzK;>t$hzvFNO6!(F{6=kQ1mDC!_zg~OtsslCj<$l_Tur~6uEtrOM_jO*=u*BFD zyrH>76LLSxW>;tkc2l`v*&ex{JoC(-JS?>&*Svj%4fmOE)7r2ImD^td|K#bf`N>n~ zbFD>xYq*U2j1fCkdOMb>xc|4#6hflbeH6Or^m)NriVTX%#HdBmMzQy*eBqJp@k8?g zn6^&Bo1NVH{x)mLhgla&qp$_3!rSV_>8Vg%LUu266EAlA!%NFrIJVt;p# zrC-5Cm(1eNn;aRsbzNqgxm|4)G$?g*=<%=7WXdqhALl?4j(=cV9EI6kSndk_aWQ_T zt(kx3e+_Ug3sDEUyk#&&CYSN;c}GRZL;_+oMi-$o1Rn|2- z>bS~#9;*C&uSdEa@ACdD*46kYj|)S)1z8uDKXHFlyRIlQ z<*03FbvrBc;MU7o*{g-|`2T>6wPHIif0%rsPV*hSRjGQ(EPDU?htUP?C2E#zkgj>M zYKAt2F-o!+L@v4_7}70S){bRgw74w;c6#hVeyg>!X&U(3#BOBgPo7F8Za{ez|6#vr zOvPDyqdr}{JPkDmuVu>LSafsBx!nJCpcZ5G6&E+R{Ibf%?Z9*{MC65Rs)KJ(e_#8G z=Qft6f?-7C>y*I;A?Tkxx{6#(K*U?_FGZ_J?oMwmD)F6635lOSc{*d(W@j2tX2}tk zeDWbMu%i#{QhpATvr=f~MZv(arziypAufSh*FB!z%lJTA76Pd4;NW6`sguw`~?G%lg(nQ-P$ zo@lCvP~AcDp_8It=K@&T$;p8D4;5EFd*%>8kUWzm6BZ&~8M*s6OMjzgb;t2Z;c|`V z_QvVD*tvl5A0MWSzjTwFf0sVxb&?Bxkjj2AmEd6WYJ}VzqU+QrL5Z&fC@68uq$XLy zGDp90w)a{#R_{5%xGnbD`<6QZytMm|&c5IdVO&{Yls69L?>d96c=9Eh~S~d%<@^GQs#Yd75v~?R9?( zSzd8-jpF6hls}EIrN&O2=Fi_=)=%)`!pa|g=)VWGyZdcrbE!_h!{6eWsn1|+G+`Ia zpu43;<0IM*ik~P$QC}h3X{F~=tK+Q&1c(rJAubRQYiHZCGj+S-NG^U?Lnja&0o--@ z)Kpd7w_%;5&(1k`^-z4oj+?^IouD}4!HD$bI`oj|tDY1?zR z?F`$o^CVcEX6%Ja`A9V1Xvd&`;ndK;A={|A?{ppUbE}D!S0CPoP*x)l!Bkc|AG84P zC(@|3eBD?+YbeGl02GMSifs-(ptFzK?P5NhbE*DSre(hB!j)!apxU52WxJg)?`rA1 zR*I4>+>ijQ3`em2kM{e|6~wRZmNO4ukq;kV+9$XZC>p;!AmwfacV)OO@UQrQVyMK4 zMG_8UlG(EtSzPD~+8KFL+}K#X-~AF=LT_XuM4oH7Lpg~Fz^vKSiW zw@IK9Sgz3e9x9CZHo2HUJ5DkzLW#Z!=Hxqw`AU)`#R*9p$ zY4#bC7!DXnsFMk^vfvVM5RFnnSAOygG|TjfhMaX05At6_ zQfCjvmbQ-8nT!nvCv*PDvnP~Jh%nQQEhFJ8%qAF@U<*{W(%5~6? z&Me_9*asB($>|Z5V$PLl1bi*qX7`>~4Wx1usw+F2>ABAiqI;H8uAp)*r)Z>~iucVO zF(=Gecn>N7;2CTLn=m)`Gw=1mneJYQ>mK;f1JUYOqr#tUE^>w z5|$$~QM0uVQw6M}Q8+^0JIGWxLuoi~w#fak>E6NB^Xe&9pVD9P1pfYifV1V{`PJXM zPmoKb|1y&~k+g6=diKoOGE*q+%PavvT8Fr*i1k>X9nB?3_@d(B%(8*H?cmY|C)rh^ z{Gt>Am!2xpCqGIGs)kF*EOIqsB`&{x&GSowNx@;^DA=2wc`h*NMvaFc!I&wasBU7s zSoAdEJ31o?<6&SKkBP{a@Qr`|1-ySh?EMf`t@=iwyq=`l{Wc@*iaH^1kEDP)#H1+M zMDkFjmbivhxXOMu0~Y&SE&rE)89tJD^%g{`&pA{fe#PHob{sig?!pgWx*%Bp2VxfZ zM?8G^ZTMEdFVPRFsGo`RiL7Oc6c`2laaOdkU^ELrK#u!u0e@3Gmbws!_lf|%8SvWk zi{8-PdrH2QTi)>qH*G|*pPtT0a$P~;X$03l&&}BaE5VdiK%zP=JFBk-cVnui4Ll9m zq=GB@g6uku`+6A}bGW(A&gQ+dg+#(zLR?*3v3*2lxQK$P0mp4B5*AWRvcz#S1XPao z{aqgEm7I{0CvJb*<&$lkeXj=J2VT9*;B8nRQo&_OD7-h}&)e&RX^(?NhcW%mH;~mm zdce@G8*5BOM}!#kq_$}l8VBE@tk?EsIMjgP#%Kr>1d_ZV>njB40**~3jok>?LM6$T zR}Zz=nJG)UuzEzW%c1gEd#Wa12n6B+7tyxAT}to0?w7Wt_ueZvwxFtaG4u8T9c0Eb zo?R!_l*9o;Ex8LN{95qO(Sau~ntkgGGx=rmSN~EUODm*==l^xoV)fhp%3LEC8PM0W zmg#=xy@uk2D>~dD6k~BZf5>w6WZF0vQ(CTepR`F1=o626zs(9yFV|aW0;zo!c<5gm z@Ul^=LQMI{@Cn;!-CWaq_U%SM#=O+Q@p{&jHvJ=+GZ*R{Js5+EOZ}zP!`_Er|FnSN zq#jV50rh*fVTx)U8q`#e6l&GHGHqkbt-HJR+f!vNYv!A<*AoZe?=OFcFZh! zXElxc)o!ismmM}v1;qdTr@6tdQr3p()R#mB@b8T?f=-W0{raSn<@{BLm^i|)C0jre zaVg9htAd;JZSOBzY&a+K>*TL6wXO#w;ikW48vVLRBm&Z@tzFJ@Rj}w_Bj$`P>r#+* zxuMKh&nx<$kaJx&z*uYK7v&Tkn&^u@7~>%(gX3&+LVc~RPNn{p%FBg1&dm6@;)j#b zX+9xth`t{VHCf8Tc4-m9jgxXzya0i^T~4W6P(?XuRUqiCM68O!vKXUiUsIL z+NpdllPX*QoT1^A${TjV=;>`KcFbha7N@{3-D)bsp=-vk3bKuB8eqVoS={iMIVsUt zw&aON6q_tK6J~+*{lDx>Uo_f~g+l-wcWV_sY)!sD8Xi!+RQKk#lpEjoTKC(6?>tAG zQU+!tB2!@B0wAUhJszUjNd_*0wK~^mTa@-P7r2~~Zv*MTW~9v8f$06%p?R$`6+!gl zR}UB3vo!X?=>Fun$EPQ8=~=Ieo>}n(-Cq$o7tgS}R^qtS`k_7 z*Na~cMs3S1ku$@NK~69)b*?K2$y9sZP91RYo9%&$dGI)id;$(kQ1j{_>qeB}Jk=T5+8Qf~3A0RA(9)4`m3K8EN`Vpk2tCAx41wfJSC! z*e?e6$AwR{(l6HE?%z#0)>VLizfosEW_@2Ls5eAG;|WQLNETEw(wgfm1tPM*YHYX3 zHpNN{?;5Lw$<>T{QYm?)(DZMs0C|FRYMBdK7?s-sTD*7D@>{N1s>j%BNYm(IIgFVD zZJltCFUAuI83R3P7$;g^WGb|?aSv~J>|L!|UgD8Nl{O#lQ!w@ZL6;jL*E9nkHF%F} zCr8jhwR6u`E&BQMwcRZKH03#FI6HLh<_?;stDR)B(F5AZmA?coxS`Yk$N7$vEc%4& zb8-Y#b`H6<0vAEHPI#A>)QunrH;6vczC#?f^*YT*SR&Q=pW5m)cgci;Tl@vq;BPqixgfNq>=Q?csS=xLD8Y=EMGGLs5yp*RzAiRr^>gkn@NU<8ohvnV&la7~jXii?a;DLsKm&_4jdAJVPaTU?7>vgN&; znod0FbeY%ip8CCjsLG1}X0e?~p{)kL+r{Te4_F}dy=yMpGraOds2)Ra*qLRCV$k!N z=KoW>U9I`bX-y}Np6bSlF$=D=`Q!1sJSt+J5bvCXJ}3tvs=4t}AV7&_eZS@yAK!OB zU7yKV$DJ2(bgNR$wn9Eej^sSJ=XG$-Kzn?W3n#2#c05&g_(+V`mWLn?*XpE3I|^(H zfWc6(G2>mP5K#(cNMZ*$=5YUA5QyuO%yM+t_b1zqRf610sZ+bYkGZ~Rm$!C(2>8#H zz4|v_q!~>x;E|DtN;d%!S%py$#EIglh)TtGt%_VhWI(2QAl65Fwj8D9Rl6bKR|ilx z0D%@!ps&XbjGop3HC|$*fJrC)v*3CK;p3ui6WA(e@3$q4{usr`_J{5{`VQqaf$Wme z@Fz}1Z=!3CedNG;LPT`Tcm-tNuq(ICVe+Y(GbKS>ojaDM#V;rR@ZskFa_J&XJ6wPd zgKESuw}OO?Prk7GL15_&m051I>OYB6#uscT*5%a)KTtPR4nKD;!gmW8p{c?2U}*^} zod32fq(C)CA|9xD$t2wzIQTq~)M>BnoHu&hgPCFN$W`}3@SM0WlJ&=j270fk=%Sxmk2|JcDrb2ZzLQLN zW`txt&_JSLF(kC@3_}B|*3K_9ZIQT*C=sA1LaGrp8BaVefe^N22VmV^i))79?v(zS-gizTDdj|Gn_- zK;r;LA(%8SkgG73C07G(#eT9%eh}_fV6YoPlnrfIGRDOxR5v>wf0M8-(|B)Wp(T2e zrN~>Cb?SYOB($uw#%?C6|G>{Qp~#aPZ=gVmMl@XzZ8Xlj9IPr|3<^#OHd!ix75a_G z7&BJfxSz>vHasu{9OwEa>iNMC%*<6mq0csosje_W7if|ZUfE~jU;=1UP&Rg=kt`d} za8D9PNbI*%NSS+y(XUF3$A~NLS;S55KIa`tCda+o7+vsjE3x_9g20N2u7)fU2Wf{~ zF4`Mgx3E>j3xO?dH5bOtHbD5!o0VDXdZ$Ky)R)f&rCe~u+;2KZRP?PB_MKyXr34Jt>H`~~wVg&9cn zQ}7W`(3F-W5})jXs;DLvf6;5F7aNW*$S%ww5nP{vF}$}JoGn%H{%Fz<_Y}5 z`AD^a3b2Reo>7OqzIg!rz}>ONS;>ajqW;G{n_0&ji*|PBRtzEvyCZbVA%OtRaI6pk zpj?&zi-hDSzgn)5Mf~Ks7=nn=Co;D4<`tpxFp~?JIDBL+6x9+6)KLiBBjCw|cesyy zmo*HdTa7LW=atA6*7g>J_gyvw8_oXYQM-R{@ne~7N(1U-$0AW=mld*9TlA4n;QMK@ zcdWNM=HAk|=iIJ#`IM^*+epfl!S+0eaXI+!-^MV$rEpmTS7Wr5>vhpP|J)Hah>OZYro*kqxdO7iRYABPGIUPg*xkV>R*;NGNq!e0AZ4O z$15ah{3WVb$3_}-5pgh;)F>BJy4QnC%}kixTMqF7C|(X$yI;gBnrtwiy;*`ScPC9p zj$qPr+A&V4zx_DPJkg~;Y!Ob2`CWN4LyzR)98>#sP+F9KmlvokrPI0MY#;WGwm9QN z6Npij9m4z*NOYf6yg8%`+Nf6j!IFVwJ$zuyguuUp2z*^LY2H+Oz{3>rCXF%Q`+h)%bf z$q?z9@#jXL1a9;Ru>QB`Q&O<<$*;uGJ1$wT;Ef1}$V{<;w4Bk32Llw?=NnfNM095$ zmL3evREx0FSDM3=Ux+8qMrsL7_TesC)$hhcFrBHCDsK)tSnvyNuCLtv{*U_}j6$@D)PtVz87|3$XNCix zuNY}{c|VuCF~%jU{cQk^!m~%XoDORNzRO>~|AzW;ayI3qo}|Zs+Sd%AP}J4xO^P(J z=d$<~Ak1hyRCoJODJJ0M;P22sdGe<)?m|&k*C{i$hc3x11==ge8(u3TM~nGF1yix| z{r2qaA*XCE(-#6!7pz*-Cq>=+f+!glof`OUxfi=BH`}RYx!`VKRK8bly%ErvyA*pq zuJri3*iV(CeHORu&A%M$API`}@YlqVQ6A{|++h++l*S}FqG*;V)ZeugA$4m`-V~63 zdBLZ0aBH(0k+{5G9WE3QJpC+eYJ5|LfAacmem-vKJwwMR16Skc)U&{ar`i5CZOB})#?Dfhv$ z4g8t2rrgxSTD&v2`4+DOTXnL)rj&o`*0kVDn{k^q5Tx55y*XgnVv$NzOE!@Qb3|E( z4ZjthW?nN(o9w<#ofOv__!jJ-IHauf#b=`EnRY;?x9g0D7UhxU)@=vCI#Gn?QpctT z*-zTVfuFjo*?9`Rg4qQKE?lyQkI3a*uAojr)<8cX&i`~8xUB}k{NuD%s^jhV??+s# zM_oUYPZtCL=#FKV)I$&a*TeIZV!t-4_13RtG>h}S3NN<~0+~q|C#!e)J(u&wcF+X6 z1Im=TXxeT19zVVf@Z?mS=u#W7a34aS=>lsF)F*YPHy0bVNqG~4}9EH*N)!MNIUJX{@M4VypbGeE}S_{;)(!y8qhIoZt3RV>{w^(`WQ3szSSJeePU_N9YBm zdS^IS7Zl98rL^TmS_zf%9&v)tY+*0^S%B;ZEt>STR`b|LH_9U*`%I#)|gxhAns(5LU zG8V9)a*>C*r`l3}F-p&tSW$L;=)z40e`t&olaJy{DTUp)WSm-&m`|FoKPhdgEHS{1=p^Q*HVvduoXGCqzi@KLo8-Eo_yk zC)U=;^8BAXM>X6Zy>d+3ZQ8Ng^3B@S(mG%Ui*w4NQe1U>wJf#fOq0gIP-ur#&eWkv zexwL>Z?DWMTq{_m!Dz_0&t}6R_uV4zTyO1(iM7V+%8tpu8iFK!{hU{1C}_q@ctn{M z??EV7_yq2s)Nn9{YtaXEPCDHj^4a2S;ZoCPU8K5N`|-Ct zm;G5XQ$`8s^UZe8d_syFdt5Q$06XLAOyNTJWTjO|@Q;R$Iu1j~Z+l^AaGToGCs|LQ z)~Kt2I{K$R7^d}38w6Jxb0WvM!ChT#X9K@~(M9PyrIk)Aw+|=dxQ$iZdc?k;IrKGo z9e6m}Uwf@BuvUOri52nyfwtaN0rL{YVka;41-yqJMbGV>but}(0OFP#(+zbO2 znE8_aAdqnIj01_M)1VDPsR`d(fqWt&{7A-21Gqm`7W~LP+ha{S-6wWk_0^(&>O_YT z@2RY)zCXmjOU$e=#>mIhOC%#UR;Wm*8cy*Jfx_}n(?$a)y8;O{^!|1Rei82(48A9! zG(W3Lz#vk-;i$>mC)rMuUA&boULWXFS6u?LO9_o5V~ZFUSIy1!FMWHCsF^LotM8}( zNjD7<>EiWf2zGH4LaqzX7CckD4zc%D8#6h+z%~&j1((D3Z5ITHtHSbpUANBh%T{~Z z7JX?C5QL}MSVOkW)9W%4sZJddnj}Uv24odqb&T(#x<6%2w#?C>&l@+aGdOTs!8|98 zvw@VB4P>MyO2iUd#MKX(y2JZ^gMLH9o2lThw0jO_vQk6q>b5MhvV0~rf4M=35NY-5 z+Q}Ij>1J3W-Tc8hNeG9(FT@lcznEXN0pitAD1}o6#PW3b?ZpN%v{qmoO|F@<5&!tNm`_J3vKex<(nv4I7 zO=Yf2z_eT5ct9tL8hAJpJ{j9=vX{vDW%x0V_y4BPJod)zCr=8Ox5*~Le#m!OBinxR z$fp0~VcL{`ho$A;yTxAMu8E7E+cf zN1{cMKn<0beRz_7sg5ok%$i4dl$6wD=j(3tZNl3?^86WVy7*Nh^uo#T^qCN|hSbeq z;Dsonq-cCat(F955%Djz2RY|BNbZ6?Y$W`P zpY>xdBkwr`l@?g~l1CI^-FnISq=ggfKJTViTsowDSRe^vKUbF~%0uXMZ>LCeV+KiN zY+0>07 zp6>~(*TU>4wNlUuNlFug;LV^D1z+ac5b~vvEn-06{)4-->gvFK(u!d_%|!SmBuK-6 z5n>eF++peVD^XN;{_nlhUj#LBeY)*US&$y-=0vN&nq3S|<9tG{J!Jz7s< zggfG#k4yu&|EIh3({Xg{Psf3?Ae6owmLymko^6>uKVjO2wKJof8elY^+Iv4|dfAExE z8YnJF@wz=y*>&|KGe!)v=7iPYNFmb8C1Lo}Ph}M9EE6yAMF~BAXY_>nqauE$mh_ee z*2_(l?#dGRy#}~IF7-v&67Z{17O*h+Efz2MvxcBTzixOvJOs}z7HP*k0>MD9^m1$| z{=ut=;gEt<+>8+roSuQNqz4?otNtwHlMkgx$M{Fa7fcZTY*Da1Hn62q0o()eaWXO9 ziK6&BXM2RgCcuK3eSOM-F9s%6cH5#w;_f`JNi5;N}_8!3>ivIe&xPEjaa#^K%6)YM^S&6#_ z@=|q7sO~cas?Yb(b0osV-aACERhEavUecPe#=ikpZ`?ys0pnw zkqlWH93G#%cb8jW`p)yuOyU3dh;S2^XNvlAr*|7t$}DKmD87sBs%1L8YMs0D!*5+{ zs=8vMYP^sjz$>A&AUcdx$Uf&9lS_q8)MldENQL(VgJI@Bk}&7BtmTZXx~pB z=i9wtzUCftP_$-lOrG4#dhAss%m3+M%y@u!TblJt^PR+mti3hS=onwS18oPJyfmH6 zBH5!X{75MmtTP_F2DEgDcfQ$`m*u~vK?gxU3A8of$b4&X|->@0)Yh& z;1R>2mQ2D1uzfavzpFoX{@os|Rs7gy&iQnF{kKP3;R`KmswyhiT>ZW137CMMCfyyY zDgTnPp{gzF=+PjrNjV=szn%<0a{Nu6i&O-U__HSefBUm8{Cm8x6_&SlDJK8$&hu;@ z5gwjnxjwCV4nbO4BdEQRH)0mt!9*|8JwO(FD=1I#c4sc)XD%{n)r-rTn_P+5>hd9K zY1LWE^W?QEx5`rmV#Fx z!F4;>2+dRti~e)JT2;K+Fy9vbQiKaW*W@(M#-ui?6mHxC1_rP#{W?-cjELbU`7`c6 zE-B`uOxZ`r*)2SbCXY*e>|7{J%5b_xv37?89Tur2w`#lU2!N1cuV$?>S>nOjHQjex zR|C7JEM}0HjQG7obTvu%8-8$vTxbbv9onkxG2~bl|0PBLrQUc>7*zUZSa7MqDTY67 z^c|U1s@mXD7zr;lPPS>DnQ`my;|{XtWOXMtPA^X6-K~l>+$kFFj`@<=0}A##VHK35 z&XmkAsO)X`^krx0f~F(KniFIEGV_YuXKwG>*Cc~i-VL!_n$@%+#oeVEqL;4JZ@BoTu~?7}sh&tN zQM6GYMN=rtufby*{pjKM^ylb@mD4+U7j%rB))fs~Zb+48p4kt)=l>zptdsNC5Q0hx z=`o_LO9TPAKo$VCcDnHL@zQ?hOKJnrA}``eH-M%!CqFA8{oj%wIN*j6=w1j+R+Z2G z$Y!ODy{H`wuZ5Ob442!4seb&u<%zEQ#1e#1sR(Qjcs+b(5!Goq+t_cdP=*#XB&g;H zIMsY`%_l|=XlEz~OH165%_aZP|NMeC#nBs?m*6nsL{wZMw)qJ%Ejg-0yA^^!X=*8A zn&Z&Zo&|(uL=-%H<@|w6hW&_r!vj0=j-Z zIE-t<$~+ddSld-S+BqJ6zxDRS*A?kS)#+h282^5pt(oc`oVHZ09f(Ad6HxW<9~6wX zYN8E(aJ3S;x~as}`;59j631?5FaPj-I^VE|RV_8Tp1Ba(R!GETno;Cu(~P(51cb{D zq~Kij&bjA&&@|0~j*&KaBIT}ajj*NyA z!&C81s@1nwvs_GLbdU(;iQGi1F#E5in1wjIQrj8bfeEN5K3WVEZO|JLr01O~PIHT7 z(T8E2asStv#a53en<|SdA5=~|q#+Ao^X#7b9u5tI-hXL<1z`!VX2OZJ6@E8$~^ACpISmoz^yC8R0lEW#2H8pbs@#(g&+0_Y`R!@R?&Rc<`dlLOJmWN!8s|a_D!(+j z0OHhm6HT7k$!#Hxq95R=dRJ_YrkP7B^}QITnZv0p!IKqkGf_`8g;LuabWF~niqOY=iSoQ|^!^{nz>*V!d)%mR*$&F-8Y-`w1aS?`5> zbwv23XhmEdmJ_A9*76DEvsaJ)>K*YaC@I4@yo`LmEnXM8ZHw?KNYlD%<6N@sdz60q z{eY~_S|c(5YUe_h-InRt?=(*?5XSUb2NY!{YeM;K+wD>H?NrXpPoC1x1d;%a*x=+o z7-bbfklnsKS5r2;wX-l&Kw(b3G>Lom=_IlM%M3E8>h)p^Yx}g8BKar;?hEepgS! zE}~^r&sK~+U-A2Oyw6J8q3Q9=w(+!OJ)t4g>csfB2B5c=m`LeLo#3k2$^hcVWJz=L zZo*#da^MOlCZ&4G!Ld3ESMN15E2Co`9lNHGmpfrSxCH>A1=8N+ATXmkmG=1Lp=3`c`iJQ{_t=1SwG%JiR%Lp7=`tSi>j!IA zj3OS1eV9`bk0Pa?E0CxgRFv-I*nxi`Xu73AgP`C~ArFR+b;RWgTQmPQFdBSuEh1yl zVop_+Xp{L$h$PQ}muo1?=Byn73a|WNsix zUBk>1*I+Mb!)+FJ0tSAJYb{u7>P}6lyzOiOlr$c!*$W+6sWFp|f|)6H62=`Y>-!-C z-Sb80=&c6s(1*Rtl|tRBrzgIQ9}gvot=sEKgnjv7_v0nvY>tvz2;To9)W(tM#(9Sj zIh#9{WDp1>!HAg&WSK>G9RUREJY}s3&+wS^W&`aLT}z9|gEaHu5Jf&7BexwOMD*w&5 zUdri7Mgn|q_O6S-C0+ZK8)>@wFT2pSFVW> zuIQ4R6uQ{P2bWNT=BX*Go+ZsV2}l_FuHb5Hzi zF@dDvvv8TE@7d+ozn;to$jQFqx&JR-@1GaPpuTmE%ak81U%D{i%!pUe7qEqDews#@ zKb^uI`n7+V6Iwr}%vomz(CxcYeVGarU{IZpOO3%S7r`9cV0!aJXG`*Q7{zr|CF`)W zA2Lf6j5vdro3<%O1*uqv>~Lp5))DqH5D0yf&+-Ox2j3gkd{hIal`Jr}zk z9(zcEADsw?D}DHvrjD`aon!yw3I7jQ@c;cACqUA5bq$>(025DUK2eyEum}5emUZn^ z1dah06!zQ?&*@$r&D-xC8-G;GiY@rNZ=!8(f<_@?Af6a+M{}dhW<)A6YCG=e`>+ z!8X-u{p3k_3i-HbpW&0Yk{|h|sYu9`K-vv0X8}n3xc{S>@^b!2=SZ*jUi zXk#VjC(j|GgnRbP2;nz}Gq(7f+RZ}*&Qtxi~6@tR7>4rOFP z^7{J+rlcskktUypq3&8Odd}Iv#SYp|m5tk3iKBX){zKK9s!qCn*eQRKvUcjJ&*9>N zowPKHWqk``5oFkeB~wIqc5Sx-l2I^%L_T2oVaWLt9KZx=_v8zAJ4+Re*~D#<+-{PEvT%8U{Zsd`5jEui zHoZmYlQlS4!D0tc-z4-~H}jEs(Kqyh0wJrY!&`bCYOyu`U#NTUsHXC^-b#FaluIPBoXEUz1OqHi|bkkc+j;ktAM7#qkrvW?Bv`p(yU^QwSy?cB!R zpH)W*M&Wp$%8tfMNr5Qi!`v~QOwk&1cr~Y7!WUtZ=zZ^ci`wf7+ zFF*O+l3znfGgN=4^9=1i?F`Vl!V_eWw$VdQr9hGIOeXjQ#Ys~aor9lbrjVVECgsR1t>Q@9dRuhFabV|uhf%nYZ&yTU3u!?CN-dOcq2`RR*Zz=&fS-<4cFS$uwMSbaU%ZFCHEltiBj^FwOx_7M zlX7O{Y6$TdOAKQ-*32bCJ(L%+moUH@0;hbkrg2{hSHV&W#*JDG(w)z@%~8X8O?`B= z$K);BlO)A2i^LKa>&Z@bg3_CDS`VAG+Wcx9lsDfqeINn?6x4;;msqT`;A9>rN8wwtncxwX9p*kQBNvFt=;&O z@YHrsKH$@^raQGCN3_(+mY__uh);C>5$W>BW(}zFUeXjzC&!5dlHY1_@@^w=@oZ}< zY}Pxel@#1-U@#gkFE8lbXtCa-M|?Snozgr&5Z)1X{;aci&rdciHn6U?ra&(Tae>_N zPk=pD^2i*n_?^#i(&X@$4IG=@@aNo0U1B3?GS9DjMW=D)%O(*N{_f~W4cAwljc`|U z#06g#@1vR0mP!qvhIJDcG8jSX!Z({X^aqoZ4-iFHafqGoRL@3TL1qfC7U zG^oDYR<|dA9Rm8@XhnjA|_8LUv8(^2`&yri#gU_GWS2<5&69=XNG@$X}GDx^kc9cgg>{ zrkxf~>O8W=4tEUS*fYjh*fbGV-j@4l3^~l5=DyE4CE5OKVTjZkjAX^Ami1}qwH=#9 zERU3wV}7jGQJS{TE-i;4uRnHSiQ+|%4ZGLV*FknQYVzLnB9?}7_h6KM#`Tgemk$-1 zUpQi05CmR$fBgkHD8rUy)qqC( z89*~d+heg4g^l7ZYu=ycn99 zvIRO8U8_cHl~ZUQ{gBE!b9duyEpAS~E+Kv%$72}ivyQtVjbHQKeDY5*l5VD`99Gl^r3^$NL-Q+8z(3z)0xi^8g8Q z$5aJMYa^JppSbZpbN}5(BDMcBpwqmS*kJwOhY#~n_!6Rmc!!CpV%MRe{GkCid(PWr63RBfXD5_rG?Q0cc^!jdb0JDib#{Ois!~ z*RcLtT=hbu5QlZA_N{_)2!dJ1v4l%HZq=mGL6==8qiE5|ufJ)k=WCCeKR0_6>68_b z9m+r}x;Pfg&ov;ol#=c}f~A(kn+U7JNQOSVl$e-}G)C47xXdyR7k_0N;KzOE!=Ez~ z?@=zV`}`T*kdAsCHL||OMQr32HB>EfAB9=4ca;UU_ggxM;_x60dP30xN<8eCw*R25 zO&iPF4MDKg@7UFZSOAc}YpmPOlAI5Gx5l6F{qXtFV*qZ-ARpg}toAD65AKjJagN+n zwdV{&<3Z>iBOSlvP&g)3cr!6pJgimeX7xFwec&RvA}&q=cT5mA=;C=zkQO!F+D^8omCW}a(NEkk3QE|jh9261= zDRYAmXQW<7FGqamBbShOI&O8Zm8)L(o9m`*aq4u|BR!YGtGpceB1obGs;BE*0K1Ry z4KIUE0NM0S6WLnb!1FD9b3D-@95MtRJB{?N)atOXOl-R(zyET*oGH6awb^ekj~-rj z?bKukVy-f#7+ zmDjf8%-pC?u=pz5tyxGW&xXfJ@vYZbkQ9+sFuTxyKW=s=y*DwFXYA$eoru~#JwaH) zZbPAm@&%Nk9?8M+!PrCa(CvgD3Wm&5&VBTO3Mv{FTJ{g$pBqw9&xb%#LR3rjtqC%V z{ZK1k16R9NWX7a@svGqMZ#&G{5M4X&dufOP66WzsCv|SHw@{oVmDKR5(p0Si?ZgXs zwt45DFXS>qvzBi&qCSP{SLk-m8yy-z{hD<`b#hfcb93)DdTLa;j<%hxb^N>Q^m(LQ8w7Z#Fd9@~Wc ztaT_tW&`8PV1z7qeAWPBj!$xP0Q;ZK8itIxX7yzZ=~X9r$_tARdrG8dMh%(AOCtpm zmd)WAOloNtzkUx*_-(jX+sh~ZCtUj@&ItWF~>LT(cwEz?WmYo^q=IZGJ(Qcm#gT9;Kys>CBHtEPkV9w_5;bBoX%toC+Ka! zsTDM7-qwHW0HGXv6Spm%#O=U3B&{pQ3bfVt-7A94yHbMfSNu=rtF{XomEN~>Y`1PNhuK~ZVk%vGykucle!Y~=ff-8;z- zcHPu*#V=kP1j@ZL_=dEnEdywcGN{#0m8<8=zAO`xTM`#BZ|#+;_%+VJv~OosU;QNx zUuh@iog^}h3j$E~e~e@z=7_NPj+d0(+o$W{coIfVK*OfIqP~5^n*fw7 z3H+L_-B9H*p;8zgM_s;VT41B~wH`vPax`v_*jXIgmXNym>WI?~{3&&Pw%Cw|QGl;z zIIw_r!nc2MCBJ(4Vc$Ysne;31f;C&En-BfEd#Drabp1^ME{|@Euke1Qc4HB##CSZe zv>Eu20r;06VTA>f_5aN;2V|-MX&MixZkf@Rxs!x|*S+8Q;9>OPfbC66rD8=#=MXX^ zU=yCif2Y=5QzHp;7K;2cO=Na1(jP>}`7x|; zzP}$_e-Mbi+0d5abz~FN#!HfwhuU-DXBoulk$|#NV9$%-C9DXMz$3a&!*+N!8v+b$K}v6+y- z=(Dh#%{>fI(RmjLwd*b40O(Lop|pW93U2g65wWfHd5xS+Fj*`UY?11~gm-Zju5D|> z>Amozu7>u*`iG}xL^z~cXqH&u`C-NBCa)qJn#fuAdp4#Pm#Cfd3N9FZ;IaYoCoTLvd4B=VxEM2qrDrtd8i{YXumn_9r>CQ>*0;VjNG`lN_rQ_lwgCFWt#QBoQ^+G-%QJBu z04yZ(4wo^U^6SviDIco%1m>w|pn`m$R~Bzt&kY&uCLtlS0$jlXt93hiT=c6m7mmkd zX9zZax%HjzFGoKGfq9#%d#{$&E4gh~L(`udD2YJqmIWh)eJPuNEBCrA>eTqqB#PCl zZz72GAzICU=Fcctlp}-5wn2lh?t7c}5JIc2avxaDCT6CO+IFVXh#`Y3YV=OaFkLXc zF{qBEwop6y8EyJW6?W~$E&uFWo!<}|{V;Nn&8vZX_M-e+_Ew@$x&+a_JKLDSEg3Sf z&U}-ZR@GKNT6)=^($tXu84BasIMiICnFQ1^@xwnU*E4i<2+y52&0W(vvk5>4$(R0X zNtMfMAJ^WUBSL>f?pPWv%s#AcW~oio;72P13$TEZLXu?;cV7FNhcA0F7pUn(f_KOM z+~>XLq?1f=SE>?b>{X+UY+w3@lB$TjmZ0JAfXUNslf?}!{gI=hth`^)>e)ljF(NguD?Qk z=X2Yhe7c6=Ow2cI!zUe`j!zbGTSIq0t||EX7l$9>lHEiiGG;H&237Cr5OJjx3x!<< zIt_n4e8RV6`AZ35<%%Pu^7Wn)Jrql9qc~NrP377wg6OuqTSW$@0%c0>di%N#CW`%S zQkU<3=d%+^H&`6Z)Do}?Vuj_ompiU$Z^6nJL8yUhLuz*st~hdyN&=6Uv`P?g67pr{ zvCm5xN1ETv=&zV<=f>b~Z|=Cuf_#(=$ia@eH~ZD(>=bf!#4nW&w9k=O>_WhUEFT(2 zS)8)}+fD@0ihKTmf+0gpOdr2__&D| zM~`3fOwZ)>_+q)O_YyjO=k$eUNSBnl-l)_?XLijl2h`LVe~E2jZ^_FXZ1j{bVkI*t zS53z`x~*M-gc=4=_DRmYv@zWFt+xpQ*`Q>iRKyQ%aSEYK%b;V=7yNrn8-G^}bXer| zmE_lSNXSz+R(Q-ciV zQrc;&xej@qR6DEH03Ino%XT>j4Zuwd@p~xhsuBC0|#Dh~8l-4cW z$qnNbGK$v37Xw$7y41W!<#&HUC}=WKbGCy)TQz=Hev6Wuxx<%dyc~YEMFTC9KOMSG zEk&hLRtjqE$FOrUR06{qCr=TmP_?9kEIXz!{t7M2%H&7YVBrDu?c%}u-`d>l@TFCo z;!9~MzJ%nVJFYBCXje6l)oY{G3r~{8<-^`Bqfrr_mUAjx=$xo=0r=&L`U=WS=N3?CNZ*P6L zG~tFORuHR4wXX;g|ozdj&u2wdxs3US$MkeB{@J-f_hkJC=8ezS-IV01U<3f<+`{ zW$azGZe^Amxfq{5W!~0hzTxO4Rx*SrHqTz3jt{QZxm`2X|L%FS*})N}dOZZwWr(Vb zdy|;KNH928Sugogww?VA2_rOqg+RxAA-`4NDlha$Wwyi166mgW|re zun6DcIu>{4>VnxLo(KEz4*jaK;AuHY`T!}&zv0xKG>(KsO&NG3CM@{j#Knt7R!bDs zp;+3RypClA=a4M?NZsbjXbL-&Madlkor_UXzQ?@Dm9+%Vf*k-UL`GQ;9lVtKui5;$_k z!c@3RZdyVsGq2#*ChBqjClp8qc!j!$#_qFyPkx|zy5nFAaK}XlkdjMSp7_lUDYOM+ zHQ)AWi5?@HR_5Vo>e1W|o~-_YN;EVV^WSic-q6Yz-Jg%;XqhN8`%s zcYdh4^>S+Q<`fb1?xQ*}H<{MZFlZ5mF$L&PXMR_oMZ(zWw^+RimuvJ9#Sm^o?+ZG~ z@%+1jab5SUju!}-kk<}NsZ}B#UOscn;*kRtUSF-vc(5Q>KV)9j#f5EE?V2*oSe@;E z)rbkzshsJg2+7qvvIS#`RwC$Te_Qrj;S|`>oed0ND`fzMLjFMn*1xOxBsKFppL>k} zE$kJ6|_^FBh|8(_0M z!UC6HYX+!hc=08`1%w1bfs7GL=y5ZPzaE|ZIADH#GaF7;vk%I)P^O;!3NOM6YRKEd zD^wKIE)kv4Pyr1iU4f`qS@7e-3NH(7I46h}CG~@Yt_A_} zDE;wq;wZ8-d3^r8Ju1V1tXU$kgeuCA%|!|2tW;J)Cm9VsMH3BCF7S(UmI2+8g14Dw$U$k`U34Xb{ss>h1uG>~z>*b%-4USnl{l?&JT&WGs| ziylzRTzAak?C(y_q{CBQ8zpRsUKo8f(*YK+ACf&mUwh!bU?8r<`Bbyeb;gJ4n zVT4_XF!QM7>bMF}vSi4-zJZN|Pou@>7w^Sb_ehA%^SAnQB*mP4 zCGNzGHofPFM2MRVzb;lRl6U`ol`nL~;XmBu|Hg-Z@z9cbb$*z8wfqb-$J?0!vC)=u z#BxMhK`VF3D_ufTBq)}Z)Z+g^d*%^7#p~l>cnDr9^Z!(^Dxc zF))6jRW(H_utU_!)jQEyp)q`Kas!i%S+^{2KZe&Jx%Cj-(>N12l4%saHiWNc}aPd|MyV z;?(QcHkfAtWO`g|5aULvhvDfHkF-O3bt!u8zA}q!f%(NY3O^{qE+)rU^Y9tvJKwdE z7e;0~t^HB5)2|+uj=jS8C*TCN5w{WXpZDxBP_xRABt0AIL_UN6&Y8HHM1woRSeff& zqnGc#8tqd3k%!L`cLdnMFE~a&F~Uuq+ZU^3;h^kasf5DiFPrJr+NL+r3kzR%1j;*B zqxrdX`LYn6X%X74YW@{kYxkjN;hM~~g44aT2+2but$PEdqIk_qW1rqY=BG?If6nb-<{hD=y}Y% z?m4T~vZz$STy5@J6G^Ls+O!CqxhI8+6U*JtNm`>i_S*PWtjkQ?keHpSeSCOw>43^z zti6-W%4G%i6j#%4x_N|{)!dj4O4hgqJJLsniN*DZrUVn@%V==BcUfoO3YxW9u7W*U zHq|Wy>f{$e)sqR2hQruCp9GCd?`N1=&@&sDYt&Vo>=A#s^o5u1wLbY(DT73^y&JCe zP^*!#JBMqyu@Dg_SVqri3{2KD3e-#+XR4@q*FQ)Pt$z0fC3|u>N@Z%$-rr2pibrLj zI%>jrmV<@Uf34vt8w~&av<}u@?9O&z@RnrKC*O-VmRl~K5rN)&>%f(exJ~~+JRfCNwRd9L)mB%5(d*?CHgEi2Ka7&9a(G=tY)`lQ!5dvNPKx~ z)^T}15M(duz%UrqmY4hN_=_6SUG9UqRet97m1JKZP^X{U2lM>gPx3ob>30G}%o!&;GBp5Tzg)1_e(slJn-?ax4 z5a7)t!*d7S=1>DOirahiItU4ue!oNB{zuJ%3pT{0*!aAx`rEp%j6%bv2G4$Nf)Hxs zdv~xO5bv{5=V5mFl`NgT#qo!#VJ<;8V&mzO@UBZ8^rYMTnv!WNMnSJ2H!BsUt--G8t)`lfC(LU! zCG=mSq52rCj`TSTgP;x;s1%tp$;{-j4JJp!mlcOfjUMh`$$1xQjr@z+NH?p`5Ejw| z(x8vE4bm27x9t^Udk(i$-e;s;^~G?ipH%ACKAN7_Uxjroc~Twi{JZcEIgg>fy{_=V z5gLM5lF&o9r-8K690L>KRpEpA!b;cfcrfZz9y}uE-!HbA9y!Sv{KRwf3)%)j`49Pn zx6_WBnWZ#EZAGk}F122E9fY5(ncEEd0RWd1Z>#T|*Y5CxLg5UX#qpKk2l)j9Ix;)3w}GOcR_ z$Z8X=I$LRLTe_!zV0L3|0%j?y} zSlsEKUEJ)!yt?KUiol|1SQT8)Ah<587#1mn!(6^xG1}3jCm9Xfg>H z<&GSfnKP0dN{1>Q_A7dhUhku4({Rf*79Qmn(eSNRI1TFH9-xw7`Hv#8;u>!xWT1NN zR&TgEwwS&e)0p2pz8sF7vANM^{oCNCV@yo^ewMxW24thd*XQ;_RqjY@n?${(RZCR8 z#&^EUm-*Iem^mTggT$Bo&n-$;+h#X4>?5mbs)gx+scqCKygc^?Lf8gK|0yRg2e;FM zm}5~0-nRDP0h#6nu>TzE8tESjP8~-gTwaZ~S~1re?#SNcc(L;$XN1 zmRG_l$Xv)KsM43Pd;4u4=q@&~OBpKinPoxd=LY3#9>5j zI%ell!sCF+#rgWz+qnv9lV7u=+>&c#8*K73K@nqZGM(*P&OR$at4He-jIMXf?{k7? z6xYh`&Q==HiUV=|@91xa*|@*`3R&0*)poe~_&!!m@^`eJOr|5?Up34W0Crehu3R z(}^>bKR%kemkuh_2L;s#4b!S8C+bDU((6e`ococAf%|?aL8tg>p6DuXCZ>8JoR+KS zt~Nb?T@}5qy0VgI8L&6`*M^1;mQq;Uh;Pe^Th$7WJL(70y@BAxcOv)x9&_>_4DKu4 zM6|d_0 zh;_awy$E}2hQzY8bVnJjc2&$4iSkb=*^+mGTFHQg5O{TRFj`_8wsdeqeTop1VQ7#545 z#p1nYY`WB$q9R!30CIa?ol&N)=C!=XUd>y07k+nJ4U8jC(2hA_#b__L(s-Bcn;0h( zA!uDz-VJ2g$yJ4(eeYbYEQ`2l6f3S$4cP-5Sh~X$fsJ=1bFsgE*4){QVe3FLDpn7M z&N<_DL;(Sx!f(9mK4bn*R)1}AZx|1R=|DkXr3+7o79t`tA_R1^3BpwDnF7;} zn+noZtC#JX*otZ-236&aS6a`U4QO4`Inz=%K{Q)pC&=lMMCkN~$M%W*Z)DGg|N3lZ zn2-&UK7nl@GrV8Q$9KKBUzxe7jE>zENLteclIoWi2b(iiIheCa{%0R8^TnjJy{a*1 zr9$t|vXQ=)pI#$bhpsPr%^S{&L_9#>{-MPbKW@l1x*ATj7ZAw!Ii9=;@^L^c;0Xxm zK>{sw`+?-3D=XMK(!O_*?e%`Ae`@^?UZkFz2J6zNQjI0zAB HbR&_8{Z%!Rt@Q( zw88e){K8SGdEh~?uD=q=9vyZRUOv!aZkFjJ)wzDzJd+P|!M`nQs~0WF4+6}-^*NP# z9>paioy`b}rV-HG@P3c!@cDcshVpn&H5sQQK;<4!Pk8!ZYyi6tq6{se*HpsUFM8%p zT!NIr2O3+xg{{cvZxHBT%Dq}mOVSgRN$${4vVx^#-4v=YAC~!xv#LY3s-}~jr6>)= zR7eVgq}Bbo;CHt6)mc~sG3U0+h`Z)5jU9fd=|#H-+^oIO>0ANci#}fdT%_?V8=X+i z#G&pPARA(uE;zYX4|p2>Co9RvZe9afy&I#tg>fwi|$Iz z((&v0dtMeP?5(UY;-e}S|1U^jnvS;Kb<7+Znp6^=NDz+aMhFg+)OsGt@SDlIvui#fKcCleANYGY@7t8 z6W>iQGcPI_sT+CK-HPixJl-GiGSA?C=X07Gd0)2!DaQ5)BH20c%=Nvo*KQw1o(+z> z*y{e`Tm93oW7652^U0h)`FwysItCj6K{sx2C@Z^P>|t)=ne zc^UQKwow0%q414L(MB-X%V8jVES8*z&*c}iZeAF0MFMdQ1+Vq>!}G&n+00N{TUn;Z znQ`eE+nyrAJc??|1e+L{igYLr(6Gl1DR>6t{WpBG_{?)%vzi4=R1^8u0eWQkfrIi& zv5VgZ9#T#zZ+kElqAr3lk_5)G@Fc6K?ywhIqYkQ%*@b&Ct(Yn=-F1Nq_u<=mEz-~m z#dy|<2pHL0T|Kh`WZyfKlH>sFKUH3`i(e@rKOZTAcZQAtUk0OH~!| zgo!!*`E7E<)kI_zoSM{8K?K^d!J6i;I0fOLHeTg}k5Tw`$=XK)uIHi#46v>%v&doL z93s6RkWwwx<=|Jd@?Tug3+TB&9HEiI3R1UFtP;jGFf8q#Gv$>uMS`gq$4zN(pxGD< zg#)BwlmVxr*CXvN3EG6O+O`U)yA(e?wig+_Zs!wUC>CE)A)M1nh4Np{b}!lwRi2ag z=E(%{UYTreYYj5pCU6{6cY4?nb29s=Iko~cEzG{EtjTW`2L?F@Yrwv zjqnQnm#%yQ|6_&2|KztW7F=NnsPH&%lWk%Q{vneUlnBUrp849Dy?&y#_&UA6pVmM| z_y55FIO;`*(JGh5{cMb^3?opgZ}M)ps1_58NkYY`W>GRtoD(1e(|ww!RNBP42-)e| z9WQBDfd@T`K1r{W$bk;o`TvN97Zvd)idI+BU%taIKc+6R?lcweU=zV#JpCjUH#hTt zYMWo-&52)l*DyS0ertha9(YH^DdyZCiRT)jOXMQ&^G@Dm=D}q^4Dn;nZI$9IV<~W+ zXd1=>E##(tre5EX^tMK}nUnXO?_tb}s(dG2flycqU^cti5lQ$8-$Jf7T*p+*e{^2U zjR!rMoce{qg{k)L}-WsE7$^YG;{ux zjZ5P&XD@}AKj%yh4xuY49p!i}hQ>7rzi@ol`+uu_LxAqKT_Rj1L*Y|RSFsT#p3s_a z$xlCe`a~?7Ost#GpCRGZFE2JnKbT<#^%M=bC@D(F&dRG^X=x- z1H-`WX4%DvYo$&FPxB=|_xxNpH29&@9Ww48pA|>1j!0i`Aldj5=%J{GqQ|WG_3UW( z#aya{tllr1wu3aq29}lH;XAxHw(c__g7;%d#N+0HNgZe9EyGy7CG5a70e#;- zuPauH&|Cy=>v}@ZX~MKTXYucv+3`5Rb_SwBlXO4dF`JiAvDENM0SM^{c$1DDaQOIz zm8H0^zp<#XuC#d)YO$0vMr!Ii{1cv30|8Kt%#ewd9#hb&H^EOMW{>0pkzbC zSfSKn1cAs4vtN-AiT$bdm+D>IjFH`qTi$suYH#-}lrHR~ zQ$BYuru-SiT6}9F=(s|MvgjcZ`A|y%Z6BAmo$TIK-|K!kTDFQihvLtqNcR^DTeZWI z*$-y}g^~tB>L+FTDZu$_`{6vqFR-U0X7*C_M^q3|soY|G+?(=#WWbodH0lg^%9eoS zoFSI(3~2fkd@WiMb;-xP+uQmqe4;2>1oqO-i~UlVUI6jNcvrOZPzap-x=*Ejzb@^| znXQwvv{j1|n<%LdRR~-krlPAjBzS^^I$xNEaL)zm*y8ysj#5j=wz>T0$n{rQXYF>zFQ7)L@G0*zCnK~TdAYygJ-xf)F-y;sA z-E>wnaolxage&?$tSNDg?63vlWZJpmy@-fi88CG=K%yAl9}GTRiEMsydzrWI6ocQ& zS`QQpC5s;xbr}Jjr-c@yJPp1J`BndGZr8Q{m+tkif0MlmU%33#Rco^!pkBGt>H0zI z?RDjM8So^B#)p91&N25&Tim^-D?#9o3a-yGI>iH$Wa0I`No}2Z`ED#dkkjk6@Zs-# zdbw4(KDVgTlB-**dn*t6ioDlG1{1ns6H}e4PX$lCQp&HP5tw0b%Sv3r(lhz{i5U~f z+NNs^MSuhFC=c?U-y2z2aAAlNEF~=++Qqdso9b<| z8+W{ks{|}?>=!r$s`xw}*wW01z}z8z+VLp!rC}QEqk1faE<<^@W>%0DX8tiBPl7+8 z1=V95$_9uhjV{&nDpl`ZPiEwY(O*^8r$mj?vz7;=ZGynG*1)PP&fm_oa>BOb>CFqc z8*=J(mssH~i&D&R(;?qnItUu0^}cl$Oj|Qf;8p8Y%Ds6LRb@a$HeI|hs(G14x>xjO z(=Yo`vMj^F7gKCEde5wRQKB-lu9Iv{5$(<<)>+}Jyh7za$)Rc>H$&8?AAXG2G{;Ne zi>NH0rB}V&orgA{B<(_-26I<~(KQta#IIah@1^tWulAxJZVrF@Tc&hT&&Ei+-#I{Q zcwb)~r|7v{!w&Gx6D71ImlMz#8NKgbqjP8;nyc1Im$XD1Or}}(Ae4I{@GS$=@rOEyzH{gn)Bxih zO}!+2*^XRiWn}`kbB}xN5hLFi>VVF&t^wGm)Xu{svH`r>D!{Q;a*A%02ga<5x9XN% z@0vs2#`U?B_XXTPMdc5ZPS*2~jYmaDf@z!P1;jzH7R~Z!|3`G1}hgigEod5|$4Br#{4vUJjP;q~ydVtPqk;naZT?Wxmwkb(}Mb zc{>ZFM~T*(YL4o@x_@hG%Ec7r6f3-LHlLuRH#kqKh(BGreLuFYzFd{KqYw1cY_d^o z&eB%0yGz2a0t*p$)U(WQD2d&2w?j#j#hzl2X+$X;4mO6MS*!LUM@1U$*o6fLh8;p7 z+r}lt;#;picW1u>+tzX1EmTswd;?tzNZxR!0^Vs@Ym7CLCYFsWTC7a!pe<8BFP(A> znzXK6>>n33K@yZ}d$V2AwqOy>%YpF6NnTq3vPVCl{F01>mZer2U=Z~tx-)W4>fL!C zkya*{oJ%XuA4OTKLy{XkhV7hSJ2*gGG{APT+}~flAIE#{rg}SV&G4E)s9;BJ#>W7b zaY@&q!222h_SC_KVG7c}1OrN1E!X+dPeKIX8beUQ%%F!Vx%h#|uAf^1mMWj4RRe{@ z2cmQcZk_LPZ&$Bq%6D&jXDeinpjc5}*YdIg1C47HyFdv)z8yO2Siup0n)P+}&DgnT z(oxbu@NqKQ+BjKA(h{0oLjXLATj^@MDQC>ZYk@#^`L5?&-5cAV z)V|lgEvtCA9zmHy_b`)~zN`cA@6Vhy&A#)gmrn`Wj_)mQe!{X&DF`t2?D%8Z`dc#l zWzF%XdP4bSUuV!}e&;*Y&8&W{nH?&v94D$SW)p7+%D1rN1h}odt`d3)D4BH5m$0+iXwB7~Wm{Vr&P8E&S&jDG zssyt#=qO82Z{NWV%uD{WOe-m4%%A2{H85?Pfu)nJX z^aCWda_Bn|(b8I5Odt3}f#*UC`%a^;cvRIo|Y5S@5e4^=dTQ4hYu*m~F!`fyk zkE)?GikRuCB1s=leT~%;*{Zk#9s!sck*1AKpigH&Pi^)e;riH@NL5X;kLP zLzVY`Zjz%C$&!ovMVFv3&LeW|V1J)vzUzRbK+mdy)5TRe3(NMM0`GDvY))Q_fNi~eiLm44>teDW!C(qfPLHZWlLbt`=v4HQ@!Md>%q zbH~pE#)OmK`G%3^WbbjdFo28x_W-#tfcy4M8(H?y&9e z)9TUI|BK7ee=*EZBi$LY-^z8EgnQbupT6U#+^ksXP`7GoB4}yC(|`y^M3CwOf21de zcXQsP9Z-QFAyqGd+U*?xGr1=Ne1Sn%E{xQX)fWo$VMhno0DH?UA-G z4J=7G+$NwDJLU9Xmy2)>s%YP2^+jZ$phxRpFI4vxEr&;a=gat1)|Yuy>08uRxU3EI z5z?Eg4|X#T=04f_4>s#vFB#W&4kx~|E1;0t%DY^D9(OLlW)5v`toKRaG(x|y+4IgT z6_gMriu>H+R4}#X?PP7+rOazFEsjFu@Rig8tT*kmc6E7beA3{1sXL235T)aPKx&R;`eqwU}?5H*6S$WP~5wcU)Ee zD)5PJR09N| z_e0AWuRq{Yur4epra~K92wc z`g@n#CXI_vqd1!AYOJ-6NHRNgt{!CrP|5mfjUTQK!0vvNCfLfL{pB$8dno{Y`{mO! z)ez{eB10^G$6>fwN-<&t1u;iI9QnPOj4B8}#3G7f-5ia_UoN6}#Zj8Kp^(6#21Bj> z`x6Njl#cG$xw3l3d8xCO!P1RCirwYRrOt`uB_pr7NH=(wi4Y2l@yVDX{3dFk-l2py zr%6x%s+HbR?VY4wet@>`4MK)2hYV~?)Tgkgs*MX=(#S4j^!y|sDaa*Ac>91M_Xi`aa#=($#K^qE^E*a9B0C|JMdG zGqA=b=bX|B$mgE(=Q~guixM9;4UFe_4n4UQOFRR3hELjk%!&0Lzk!2i8hslvCO)Gq z0rDqyhZlF&kcNE`mfLfk%m^T~(kZ1$4rqws=6G_QoWER_e@{&3#L{-xdQk_dHI~c=CLN@Z+%7KCw!;WEkn`6%M!sTWu53k ziv5j)Lwn?kPV)=GP;xF{q|;XBwne0wzbs(%K&C2HTQ2n%I>`MaAiqowgu)12uFAAlk# zLK&GNv-*xtcd0)=`4{jjwDm%|Vs1c^QpeU4kb+xF18tWA*Lav)o*O zrYsePVd9qZt~V4lLICtTIPC&>+iW~&p>28kPW(aO%y&MA2<5_eLplwu9y>9$ZW>Ex z$b=hQd4(yAmPn$Xgu;j$cyQvLpvvdDka)yZPcM4OfS@G*L0vDz;^MWuugD^HdDHFU zsLQGjHhp{1_#xE^Hb%ALP26@2^-x$>=Co_^sK?5GVeY-dno9e1e`XxVv5SZ(%@Gtd zH0jbuArvuS2qCn~dA|33fAB*4E2Db;L5kYtacDGmY3b^%=>Dp< z5e8W`Oi<69hc~VJM4&KacGk&zxxhnu==$->_wSYiFMO&P9ykyq{o&j_)i^c7URD#S zKkrDIV4K-PLywNXv-7UAm(yYOnKNYDqUPs;t4FslX=8LAZ~Dbu63OE zlu-RXI#V@gy>J&TbPSfyJ% zilht~7QiHLT!}+0H^w9gH18Au(!*~7YpL zSag2wN|LG^;qt@a9*%A`ee~OKGOjF9b@>gekScXph>tTc zQV&lKf|mxocw~f|_N1=x!fFmw6C3Kw0Jma5kN3dn29b5#a)5j_a7za5pe`LOxXXwU z$!StVAgk8nnv}?=3!Pz_*M=N|wbG4rSc^VnMKo0~nj$hJ*z5c}%h8lq(G2FUw6^t$fpi{4no4M)ZgABC(UN9Nsp> zt=XIs2x&;Ra{K6HU~U?;ueC<#l$Z;pYIAtRhl{~5GjvluFWFib&ccF>^eqDafn zE#Bnc(G&93<6_!gahizA0)}H-R7Qzk+gxxYTl~NqPq>OQ+0^WJ=~~*28brGU-OlP( z3I4H}Huic;AY*hURAL{$-mlfS zVO;IOwRqz8hNk+;Uwm}wSA{RE`b0`XT3X&3uh8((@w}LT{h@r0i=`5vBc^eob}dJb z8MkuRc>nZ8YQyKh_+;zMT!TXa?^Al6(qymZdN0vlV}mpxqioYjO28V>d;p;%Q^VXU z0Lu!2tSTl-Mv`wkyOI0GGlpoEUujV8ScyM`Pzw%s*$N&7ho(r!T>GeL_sQPQDZ`kZ zdFsNw)vDjBA1NTgMnJ<(CAEerkf@nI;5N#>Ocj!>K8J+jH%D28XqNnW73_nx9;qsc zUK`A$bWq})DlGKekXE_T1LVgy_RV>C=hWm5{3zJ5yf#FPs{wty14}MY|Dvd6yb8KI z*!}qR+aLb)p#^KBLo>1}q=~h8z}s9+YH>EJ%X=AVy`!P&SIZ9xfRD|HrAOCP{u>QI z|-cL6yAAtSRpa>EM$Y6Pe!_rUTbpa&QT?e=4+Iz0v zN!T>!g?Ng|`_=`=k`MEjfi^Z>F+D^SxDL%>umKwFT@($WBL45BuVtC~Op5Kc_Nts| z3HY<1b@_VhX;egrW2}oM<47ipw;a4eR)m*(qj|XBM){*hSvOzwb#Dy{R5Ozam#Fv7 z{WuIT|4}0j5`0P5fMLsosu_9W@fgD@N(l>Hh)$S06H`9=4;SAr{ohmQogte72O52D zeM~oS>@ieOSXosFcyWF*JOyvM-Q6uIwF@LF86VNPR@@4M}^(jeGU;j!omk_itekb2-^X^ZP~7MTcS-?KS#W<)#l= zPhmhrt-WSH*c9MXt!&4_9cO>PWAqziVU%vs^f-$KT@3zPK4(a)-v`+M$e^XZES|}~ zmJ>A2TRg|_zPPzfpozSvZ%3?lqa4E{-Gq{lUSoDe)7 zU^xnj=*#0)=u)~V9!`;&I_T)0mC1U&KU;t4V~G$;GMLX6?>BezFn+qA@W^rhB4P4~ zAw<-lXFL&14o2m@Jcc={L^YaUYM+#--!~$5c=W!V5ZK!dZ9#U~`QTc3%8K z%3plW1vC|DeHbs|*(4w|uF15>Yte@=6HYDKE07ykYc5XNvFF+d{$R=R(bSMeCNrm5 zRe1dTZo-wf`$mS0Qm^MARt{Se| zj_9{({SD&%%t;6QLNhts3%E7?X3+^vU-lpKP;OXF_uzLVTfax3%c*#yKJ-{$Yc1)N z+{b%Uwj82RNkR&^=-h?wnm+~~6QXs__FWLW{VM(G+c`@pCS1}{6k4_3Q39R!x5bb2 zv#_NxSt}vuGv7q$I>#Z z4(eR?1B4ki<7Oz1UZXvBMJgdH!3m2hFCRn@H{h4|TtqSM(m|hheD{e69}(Mn?Z-Vg zByLNqQh%-+qsUdd4SLsr%uQc3nw$*qrMMJgvY?XIE8+YcBxz^t&G_XkRH>Vx_(ANt zrRI2%Wx{}L94uINea_74pq))AXnolAyWN)l-9eEHn5pszK+_sJ6*@+20GjnmC%0|F zf{B>h5hgeKdo9^(Hz5z2e=M<6X~h~A`c1Mg=GK;PoUd-X3-7cZy=>S%S&RfM&jIIQ z>xXtiq-WI@3c{~2)+hr%j#4`tSKU%%8)4*}Q=Bj^3D^@atC%!%@$XkZoI|`Q>eO-X z;~4hEE>!{me_e7#tFl%6N+~K(X$fVl0!qH9s{aR;``taJl-`(XTWezgf0u z@0Rk-l$96w+>-b@e_pdVx^msGMMOAiNRd*~7R}*leW5U=z6Qz4bZXI9%STZV?N5$g zQMbItNt|tAw&Y#;OZJWuEi4@V7J3LTHSQe`bhQ87_9@wWprGq!-o zn?`8xbF6ALq?|`6ID8-?MeAstnKb3iLst_zykUr10zqmHsWmzu+Q5Tl@Dmw5&{2MV z^`$5+Z_|L@KN!JeU!mr#Iu_7L7jXJH8m*-xS$EfKO~c}m zhsRFY$zNCpgl6rhP0EhFn8X~(jlNVqDVA4_+D*pP0iDLyVwm}gud8nq<)DgpTSPpH zZisqYCWE8PdA7t1x?^8Js%6=PY2*SjSmf&6B8F6OoZWP51}_X|2wIWMVk89^HP(b4 zn{qp8M_RTR0?lhr`mZUs{%%i5PM(65g1|{Js6$MOSaa!8xdK2b}kDCf)8E5Z3uHltqs{3*Gm~MyB1Y4scP(hyT9#S>f0~@EYA#mt~(M6P0 zTgF5}Rk4qO83u7vF*;sGCa`BKq@J9%gRSyH6*6;j@@?^NJ3bFqOc?aII#{U(ju!Q+ zFTJ_+W8dG^wf}s3GvKZleXFzTw&-A(rxTu++kJxRTgjWU55T_grtEmy0&mKGO7?q& z*b7NjU!7qN{++`5YFo`StzV2EK| zlBrS|e!Geyq2*mfK76@`AkYAN>W?(G^jl9O3y*4X&iSOM1 zA(g`4ohSd3yZ=DLz~>g~!Al@Fu4z9nO3eL~H*brtuCkAv`a<+s)Ck>V2>1w^VOQB~ zCZ}={W{bSG{cXZqg_I5<5|f?9T_eAL$f&W-HAQ8*bhrf~?I?E8Gq)_XCK>g%+53in zknLoOtA(+kj?6x&#UmaiVp7gezH=WkyPXM-w;bEzY2Ou+yF%gxj^LoTR%3xYgSuR1 zS4f;w1pfW^Zod70h7%$)+^^Gg)X0uL*IK%Xd1NwE76JlVFKo2d^#esL)bp3H`Ac&K zjF5LLUCkgJfc`xn?%5X(MKd`t=Vlo^@|hym<*Z$JjfISb_0v^nN&m5h-1uMv1|BP! z>N?W9BC5TLDG1X?482EQ#V=grBwfmXUNX{(RNz!=VgTTv-chFo=xPy{E zGH^?|5BX}HpIGC|wV-NXI~}of#E4`KZLlKC+QZ7g#G!SWQd_)W2}%t#SbApnK%#t7 zTxAx#8&mmg>c>&9an(x3oo@*{T9gp&6dxOR|H*ePyLU$0@O4=LqM7UH`J!YGnEQ{f zChEVV)g4$=3CZM!LM8#eRuCh7{U?+0PQmD@lMKPjCiZBod?)!j_w~C!E+cI=FTRxl zt(g1RUM#uT;aj}0c>afNbc59-5o2RZZ2;vM;zB2idRj{k0Fq<)B646%qvpD(t{5>^ZtUvM^DLE@4~t%57iUb zD&@w-@(YSyRYD6fLCY!X229by?J7ar?ahRSwcN@a^ODe_q7OaTFxEO!&+}K+fmEFM_T4E8uhGa>%rvZAB-v zT8{0oqPAf00#A+)XF6m|srrLug}pGHrh?R;i@k0s(H|wWh%ez6>pPc`;Lh?uI0-}t zpImdk^21i%H@!DuF?Wml>a=*tXDp70>l>QiXvTN|-B;Dh_}WozNRqomjvETwF+cAB zeaOG?0<-a5r_VJrkm=m8ouDU8Fu{kcW<5a+>f|6eWQ8ihD+bNUruWoR&xf8(SQ8_t zW#0e1@x@%Z4##$^E_(2g7v=5_*qrjlm?JZ$+!l0-@2AMTVg;E*PHVi)Q?t_vd;h-L zrGRUZC(_@6VSs~zl|vGOf)$hkEQPwy-TymG$bVh|(gT-R!X%+!ZJ5uARX&|2hHQWC zA!AC(e|>j-W!Tvj81H;F|JJ}4v*_90CrAAen`H3<5Ysl(l>EFyF*a<9zOZ$npkVO8 zS=P;6L2wg^Mre|$eijy;N(cRA@U0u_Woz~QZFFQhepq4`_o*Dm*0TdaUDji>_mdpIFM&jvlaK%Kvu|KbIq*t!dRJV(V2Ro%aL~z=gOwA=R9%Ni9&#?o|mk`hPxz& z_NC9i*VePg>$e5+G7XhklE>a~oos`#4@nc5kTZDWUP$rn|0>YpyI`LO4^VdiubIq!Pjtq=}> zSR5QM@-&SD?;jwkl{@0p zriUGY zlssNS>WHemr=6KHqyuazIeY~AYFK1ErC;T4PAtw%1cC}n@7^|bO2!wlYL{9iJY=WVM@t{ zBZPP~w*R&*SunBbOn$ffi-jtvFjhh)L08GpdBT^eQxJ9JGjt{cB^QXbST;pg{@L-! z75~~{=KfA5;Ylnq*lEjolH%3ZH7nzV0Tx{=xB$-{GbaSe-EEfN>ke-XJ?q-Bh=}vK zo!pz*o5{*3K_w&*>?mZJdQM+1{UA9$ntNI0AUY%GM=D37ilUlVZV&JZh&~rc>jX0h z4s@PbE!er%o=~1cf+vHWQwrB^{=P6F?on_tg#g`_jX6g#b##P+e~`l^s;dPUM01i| zU@rw5m>9E|;VFuoOP(;pw)whUO6yF6H)$Fr`ml}sl6^4l_^ zum|CJob8mX5w2dWv#1w~Q+=-7mZ<)Ipug=*L;A;sb@29EDb3nQ({ zkkWRqyXWC-%JIR=jXFk1Z8f{Q;b3Lh&x#EbDIu@<`pmfN7jcJZ)#F9a!V5k00$fvKK+ZYn)4Um!nJ_XIMEZY4i+ydPwxIU97Ygr8~4PS z6!S7e1{F6sh;=Vzk2!<}BSNd7?hh;avBWMSMONg8KArR7@xo1|eGcwz^2WncG8v=7 z}8H_N=K&il1=2+s96B&0-Xi_0p{%W9)e!qP*_@PV0hqJ-l5cXgnhD%AY7#9g+0qX zEZC{ARk}Da?Yc~Qi;aykc!Gm;{D$@ST9&Din{hld&2y<{IViIOkLS{?G*t7`yxz4a zk3ZS*=~Y0VG7NL<*h2VcF&}Yep@B92HlkUjz9}JzWdym7_3FtsoDe=2cpXw zR52@l$LE;>^l{GMpNV7UP2Ht$8}4mY+pXIM#PAPd;by>RiH6cL*$&jUW~T!|j7g5c zaYg9CVI`2)4S)j83Jc8~ZffZlhkp55MKB^>6`2?Jc>bDUnmjR&zE*_D$@k`Rv)bX3 zdvnNeSXl!h16%sK3ul~?KG-Gws3y98e(WuA0yMrkw)LFSZZ(D&7abNPm?XRSk8MBd zu|$*tBnnB-sr$RB3;8{&rMVAS` zgT%{L|Kf|Sh|~SqEoIQXf-3oWT)F-ch3-7-ZzFjPAJb7-GRIJZ)7cfa=+8S4<<62q z#8qHnDoIIm0zI$OYCS?B-A-W5Imx$^RRx7(W+;IyGf?16<8KZUE41ovQ|i|Kr<5g& ztSO%yQ&%0qD<~=E6;RQrU=V16QX49DAR+N=0;%!MyBE!BDWA>@NgkRGK-GVbFUc&@ zxng=*Qba2Un-K3=*Xy59E>Z2mCUr_2c62=U;7O?rUflS$-ITx*i{!j>8Dko<(wIb5 zB+%jjQ6^ZHXKFv(B;uS*sg3C>P0eqos~8C!>Mj+h--ubWX|jZ&9BzaY1M}y9C3lr~ zdgIMp7>PPP{@5<6D%k#L2bmDWxV-jyO{__+V#z_~ZrVwvCm$7F#JPXP$Aqc%aSL zF=P0s=KBt1Jl`|oUZ2ztr8+(-br zI9!9-LetZ?=7X8qN@=wXFP}2kO=N0wQCSTlwZV`!O(EuF>97C&aQ!FV`|e*;B*p&? zc66!Yw-dkq(;fIO(W58ZRg;)uQ@L`cde4h{VDY~49U+0pTY4+u#eotL8XXPIh!3OE z6m|b%yYFTO1xq{ePxJC>fe^&&e0SBdCWs2kpv1pUBO@bvt^Ty~6E!Yw``NTD{8QRw zaKNyXi$)~>FJtL!DDEdL$Ve+a&e#p6F#y2JFX0pL`JElubgig{^W|cv2kX?Wh#JY& zO3mdR)*&ToLj}P`*1LN{xBczsP(toGE9M&yMkSw1gP z=6_AZ5G)RIl}ncA47psll@6jM&`05|dI5`{NUnutByeQLQWtnj@;m@6Qwwvj}yCqX#_RVIm?M@k1;Q-83AiV^|;JGt{9HIxuvTL^7 zKdzJwblPbnBoxvX<8!rZ*jU*r-~J|xN|DlysQDzJqC&kj204DBrUE@DQV-}t%(9b!#L&-7|tM8wh9jEv5oQ+}ouAAvWTcgMlIrH!`<%l)Mqn4TM3)E+J zlcwKycFye0F%QVnyVrXZ` z`19xGvR6*$;N` z{)zrNtzBTE*-v8`2&*Sz1k%*~2fc=I-*v%1$R{Izb@J`eJbmlt2_|}gcI7II!5~KGbJ##8Ixkh z$W~~QB~jFP@iczbK8y_uE=*5E=5Hk~t}71L_ES zy=DeS?6UT}oT5@4vUOMQfE=}mSdb$&mS$iy*U?pDLV%cy=_eFW=aZ=wy>x#spQAJlAs7s{Sf?dY@{_AD>ZEjYB0 zQF+v@98oyw{&H)IB^ww>PhJnMN>+Nj2}*F_Y@3JZ%vLwjnGw$0cG}Y9zxdK4b`JCj zWrHk70w96gbi9Z${Y5+Td^m`di(rCW z#V7L{BC{6F0;-(WQuJXNQvh{iM?tNZAZ+PM(0n?6*3p(i9|Ea2@8Ao#cfWL&HcW6M zhG=PrFAnSOZ>rgaSMOUJyc9kDm5)XF`Jauu|DNoy&zkAF)3#*wdm%4v7Dc33%o&;Z|d zv@tGdb&#f-r8CyJ(pzsK&qI@2e6BEEs%8giJ69k&&~WQpuQIy*6v5oExKJFYVNyhj zz&BLfXjoJI1z<(4wjd~x=xRSeQ*fltU5%g7R}Pn^&bm4aPkA#*N9X;CFB;tS+^qD1 zW~2t;rWw|LKQYUyC!GU=)%qJ899RY~2fP^mr84B@RzQIP1bsgx?^KmPZ0nSLMQHa! z8s#w6uEq9N7^}7X{!;|L{MrYafwKsCIeGB+1q;Y+d4* z_lXGl$v(@vQhu$^ z)2MffBFj3IqB^hFyer8qbCCJltMvd^{i>52>w_L1y_qQ9wA_;MwSAH6+~1YJ0rDWI zuTi01$G3+{^(vdl7EP)T4}`o8kGI&zr|K)1SkX$~VFt3R;NkfeRCx=D& zI%#(wdif3!GrFd?_UgrAxqbun69(>^un|I<7iG}OnDc?Uyfq%4UQ}Mnh1}!mr(Epv zZp&`qQ7|{vmpskf3BFUhy#y{v7?Sr%kze_x!f}R3Lo#WKhYqTI+yxBDH-%+8NHOl_ zyygj;Fblye4%y$Fef+cLd43fhTbrr(9i4}F6lt#yTLc{K1_d4HoX@q2=WLI zIK1!KuZvf&9}VB=RRWxg_O|$<>NQ9%N@N?v?4Fi(#(65` zh0HMPAfsM@?f5he`b^5}qA%wLKWgazlYP95sz80;vUMs(+jc5tpb2W?iO%N;s&J4w>A0&tmo*5p9rH#w)bhi7 zSlAnaTM@kK;6iS{-5Wu1#8@ptPHCty@nqtiTM27z6b1b5$Tk2K!fQuan7& ztH!_emOs)?>j%b8uw;j@5q=3Ibjs~{WRL;SkK*m>Uk!z>jk|!LgwgZE4q6JH!Ugz% zweF;u)LqVYOm;v*L&*=>?ZXqB@DY66whRt99-waZY3)^%$B+h!dcWdZrvg>p#?A@7 ze*o3d0p{=A@~D6!2CN5wmOfV89Z9e-e(vG9HTPCbXH<+A*8JYGQqqruv97_Sks{Hl zuNs9!m&A2dqL~$by_Ggv*Tb40g~VS=L%0g_{R=1Mf5$ZYNJqBZ!sdC5ds6dIyWI&H zsfa3Ws6;+bRx2oQ5L-DUtjt3aTG@KqHsJ2dtAMy4k*VItDy#ET8wr|#FuQ{T&*Az6 z#R22(G9Jt?V}6uhpap8q)_b#q(it}H>vUHfz1D0Q`XSEgW%c65_OZim5|TU~2oDSP z)fs%>t2^T!9iR8&@WYRVw#Ao1)bt%sib%tPFQi*~b3w^=vm8+BtjeTEaK*$-LEP(? z>(=7Dv;uG;-K-uqYoM^-pV{XsbOs4bck2cEz%&M{!fjlG+Ugn(zye-Jc*6212=P*6 zjq1=hu_YGX(4m;ALFmxwf_A~g!#7?!R)mT4ZIKSnpKo3Fi=AO#-x3HE2;1Fid2w-w zho`?GGBGUu8n;~d^2Ekc`ofLiIbOO9Po4M6KRHqV-*5U41kax71TL(^PcT=kM2N1W zY2)Mdtdh83eHDe{B=Hs_)t4$3u_MB#@i*EUN}S0V6Benr!_gkD=wXXv>&1lCb`f<4 z7%Ek#6qFP`hV`tsIL-VjPYgn{Zj;ZRKX`mMP~^K}=cic(+67)$%nPh`Nk!d@g@{c? zr{Ke_d&m74v1uXmwNShKEXoaP;3L~58^2cH@g_V|!a1D)Aj=yVgDL8%^ta(hU%8G8 zA349wLW7DQVU)kJ23;|%12F(~t$K)>^Nh8_JxyLKFoUyFs(gf@ZH*`6}cfQp41D}SC5B$gJi)zCYk~e8~X5p`# ztdA9;Vq#w@vgFPOXT`U35>TyQw63fj)J-yl8G8v7?gvu-^B7z(Lo{#Z;u~J_VEH+V*X)yoTckODgKqmTm7Lz>Vx|OiZWAcw?4$58A! zsa~A1`WGcQdYAO$rQ!CqF60GV;<#b=fSM+(jjAEgH1V}{

IR4%p8)D6e?pwdV?P zym~eTvKJXbF21t}sdqxuh7+`j2IekNSQI<^cBsMM=M>4;{JW@Q#I<*Pmj5z{zS|$F zZ+*)|=HWuZ=KhZKlCG^bsv0@t0%6W^~qk&zgVz5y@cp&|EKqMmkde zsn;QFH)=yct8fccZTeMt!aSQ_-1XCUH`lYo*7otz?P$whpf!z=&@0g{@E0E*-%t@@ z#@_$w0A>QwhpGy?$fI9~FU?`v%d6Tb0EL}2<`K+`wwBhH<=kbS66ymR+p>2H+UN?2 zo_V^?C!RF;KaP2R9^XaC^EQ`Jil!) zI{(2N#D&28ok$nqhz|~m<1vDILte>=kovY5vT8?*ncD$RLlIu+E{jT$%CwoTfYyAb zoo%QY+s*n!8M|HWq}1+N*^782Y^;%JiTY^ZGo6$Jse`pqUIs(nHFF0XhPqv;9s*)R z8CJc8DIPhtq0#oOeZ#ua0X$qBJNml;b-cRYhum}0I zM6nk7cGwoFITMl2_4yO3L(xhooHsMlDYi5OXoyC_@#&d=>(m}9INtxYFWm5Du44sa z8#)*ALqJ>s?zNgS5!N}RWfTQF=LmH<>|@{b?9H&hfZVdZL^s3bj&KQ>qRi;qNf{IgUJIQ;M2%jeqo z0RM7sDCuZ;X;n;w1FGjXVntq7Yx*6@MgQ@jZ7%Vyh^)E)->OSDJXug%P-w(hR3=L6 z)+WD&n~B_Q7Z6emDvG@2@0rB`7g1nS20(Mvq z_iNt&&^W+r6u^8!1WFtJ2rexl)DHFfmSzrGTDCD=6DD~n%B%vRuSW|0BzmVmF?LoE zEYRGmsH=DC;@5ELz^b86|I)vE@K3(*3htl(Iw~$FRRmvX_Ro@fwyZ&td)6iwR{`r_ z$q>&$pUaF5t=IA_(%#HvIGKM3T;%aA@a!ENE+3thm9=4ahH*8~h-s@vy1uFTUFF+tU2D{?SiX9h*2iSH8$y*^>uK zej*fCkIlFSwgNE|JY~W7`aOcIB-_MeKPnXtu2XrurJEyFOl4ax~91^R2As-VP;OQetvMLiERuAwy z@W1|8AL{Kf9jZVOj`6@Zd`k}xYkcyAZln*^IWL*#8F(2WDG6`XfOa$2(%c-s3JF}h z4b6&?>MxEwT{@U2S6{iNMrcTb1jN4(-E<}a9ble3O5_(Ij}?zLZ%zH&x~Eer`vw(5 zcFMx>H&Ze-tkTx0!ZM>P8dr?~sE%L5ZnIw!^uOqm=c{ZO3$r2A45plF^%*K11ibZ5 zLMA5wKaarSYXzFHf#di%=6cXq5k%_Q{R7+(A}j_{*}+?wqw7exIy0{HtFpfhlcj4V zq1%!q=(doy$nSB|s*=}Zl$ruUckJ8?-BIQci59b@3a|b(HTS9kRmhiaUfQ;|NyGK> z*jcOlQZtvS=`PitxJS>X6DLv;qk!xyE8fhuJ$pQ3a;P+JoZCQGImh#t_D$4)mL_pHU#VF zv!ilZ<@A7$2~Y7k#jpCh&kkKIQ()C&b7;FW)aK3tcX{jCR-EoL=TU(GlLV1L9^O(NzVmaC-tTT2Rv=`P?;&E^ z4pnd5wsgnlM567Rh*~Qghqa8Z>tx|1c>G{M(v2VX%;A#Q-@O%;I){FD4pUM8Aw|Xu z1pQsQdO~o^@zv;MlhF>Wd=FxI=TQt@$aU2CJ5)v9?~smzj2D`olH*Mo)MYCJ- zIv))cylR|5HRcjWEsS{m`gN7c=kUY6-Rxr?(flty?G|i#>E58iwi}OAvD&T<={SgV z{4%LkaP2tVS18kQGy7ua!k_(HR(Q`yx?q4W=~kwNN&wM69UW@`L zmF-l?mYnuE^F?N9ISKyE&;H?Lhk|4R8iRO5bb3KZ0E^;>CGF9Z*ldS$O0}zSz)ZGs zUZpL!!Un}!S}J+Z*14^pv;J063L}W$1{7E_^i0C0CDEwfE2H8Mnn;kmk%rsI_TnlF zlYl>q7ArxIixJW)_{7FELO1CE=Ti>QxRS&*-dxBu&@~6k>7hjH2YKfnql@ZE@uqL% z4zBE_dXlN)iYX3dHTZ#|6f6dG)}80^G0yYn|BG*`vvDUSDfzd5tpWe#ZvGX29`7Fg z7azYgOM&*2&7a?W{^z^7*{Ta8-5*)3XJL@GJZ$!^#Lc<)nd#OgiYdN z(JvZ>4;a|Pr(=r_b=LkMr`o|LN95^O34slU1c{Hi@wuLutkV>*Xvtb=$pNQ~sA{&C z!0L4kjMt{`G*n2Gx$r}*y`j$TS03w{q~fNTh9pOEye5RRfYb=e11Rk zzLkF#tF61ME0Dy}wekG&nc|+|6Pz}{lg2c}iEjge4h|urL+UsMn8u~pj@EOj;D9N$ z0nY}?37XaqLr6}QZSs|J<8}>jw~?yt?!B|LK@AX~VlVgq3hkz0W$ zm4=|IRhI_KuT>i`1|DxK3PMAr)!60BG37Dav6n=oet#?GQOl$sSUr0llGmb`p7xBJ zdWBG6@Y;F;lWDc7kS zjtP8>kHVJ9`dVtNI7Snpj!weEk88q)bexLYu)FU_ZTo@EvEveaLb1Ed_8Oxb<4tltay8bV)!bWRrM*7E#{>i zJ(ZI&Y1BM=)%u9XOfIbX%wK#n|Ne1KoefLO)XuEqLxFLgPYkLpV?oQuM<%uO7ui2c zKaVJ+0cTQgv=;Nkl6e8qf^uS4E~JNKA6-=}}JHii3B2@Zl7MobT6j!<4n~lvfH^_$pO3~aZ zU7axFl+%ir%aqR_s0;6Z9lx4ky}Vhoh24`UhI_@~BNHvtdQkRMCGQY>s-pFds_jrU zZrO7`Zcvo`dXX6^_%Z!yyc@J!aR+;-Mr}aFfWh`u8T)qOVSu#Sx<=3{5664+gS4lR z+(y8x0)#tKQs5-0Cdz}mmGUgR%;mg76OZ}P=+4Ik%^VSyv{BA5wFtxH{Su8fWO!Ua z;)b-KaVV+a&P8E#jZ~neovO&LkeXFly}-tYS?KpPAvt1xn6Y~(H2-?Td*>jwEU9=~ zs>QbmAL$PL1%s%%daB&2iZC2LF&wB4JoSwGR24lag~aU#r@GZ}ls$^~0>{P@tss5{ z1j(eaO$SO@7ifRk;F6StX$pNtBwBPQIxLRC`VOKY#QFfR)6~>WLJ%o@0ol1bYad|DTpsCcC z`H|n_3TxNjrE^;2!i$Gw%uai>lywRHR7NW=i5L59aVwxFIfio8#SywhXMhWbDoR*% zOWdE|vM4+E1g=_H5 zY((CITTz+)q(s#pHgP|!51pR=wm1o-CapD>z@iW`=J50nM%?-+Q!N2W$L2>fusCqE?Gy#|Ikg!`rs>w_0Y|5Pk$OWwj8Kdua8?7 z$)$gOydUK}=db>%E18HSg&ySBs*is%Z91-B5WT-R)ggb;t76!`E)o?H7#L~uD|dc= z!NEEgGck$xVtcLKCH__@*ASo;8*b2N**Qa16jjwT5YYkz8ldt~;2`K|NHAz0aQDh9 zEhp1d_T-*8^Oj$24-}>W3xql%vVRl)<)7Ev|94yeBUcBL+SfdKf{QYK8Ij$0Pakwj zldDgHd+h+C=*uqel(7l6F_eV1I(%k0k91%b8K*XNdX#jxK>OizEy?% F6VKHh`H z4O&Wh6eZ9;Z|GNcU0O+l)mp0|iXN6f(o76HJ^F6@!?g9>UR(-t<$_vgkG}a6w(bMD z^dQYCRC?M3x?Cy$s&T@GY6zGqO<0p}{$-{jcDX5fXiTxy`RcOXX)06fXyvCrVp$I>*Mf(tg^dBf2_V*&?$G zNir)j@IDBfh@CJQq3C(-2PKQDEQG*&<=2u4|jZq;+c`}u>z<$%uZ@rcL`uR*^t8HA{F zuv>I{wM2Ed^asINUwApp6~Q#S5K5cWDr162z1XSufRqA69ADc@-k8RXqnGGCKc!lz zUZ6vh=VUD>9|7*CRbB$%N;+aBq{f%=iA93zB~m3}_trT0kcXoT)Mo}^{$t&k7R?FT z!l<$?ECHIiQSjcpYms)2lK4-7ZzYD~bbZD$6Km~s#+v+XL%jq2J?>%|q&s$Ns)bk% ziMs$NfB29!l>=7f(F88al}-oS21>oPQj!Tw@5HK2fzg8 zd{3nDwby@R@4e%i%+`HzX2voWR8&A|j#LdrDFJCZDgh}00|`B#R0$A-5IQwTZ~ zthJu?l<)WL%7Y(+96q*s`Rx?x=sDsqsD%0hPoK8r7->;@sJt?ZDpJZl>sTkBHX31m zQL>v|JFzz+$D=#OI<7#SDKee*>4vFf2k3{3%u^2{31i>*26&HW0QU>Qg@Pfrr_|S>&B|*tSD^eHPdXBg##a z1={0!jYIuSVX^O>7$!A~5YMVs{3rYZm>6m|s5wXsTo|z=mLuobFwGY!s{Fn`nguer zpjy{iyl~jWjzz)-Q58upN&;jD%+%yWR<-MK1M~@!L9)$;H3F1Z7C?F|T%mdGrhGvS z>thMwQI;XLPuf>y$l`OKqy`#3FzHQC3WR;RK!&G$wYC_)>Qd&cZb!lysz-d|Jj_c)khm<5G6{GrRqT9nf{9+XN02*QPmzpmH>l#m zL4$51H)uME8)vc6m z39m9@9ctve1|`?%wg7afxw26vyu-Cb^#Z*|y?S(bE=(JKq|=#h4R*)+L2%KLVzIkZ9I3kM;Vu@*ie z+sx2kF)ks`RL!rSmjU+g)V84i%Yj!z~gS#5)Z zk6j5K?Na;*KiRyB&8n}^D%hF!jY*t91YXrE&8!8fdLPAS6_xrp z00{XB9(gTy6AXEaewSZ)Jgk++N?}DHL`ALZHRU+x4!1?x&fFtjD3gK!E>1eTL|Ye! zOqS?tY8U`pgj%CxXaHVu2taEW{Bhp%^QclbKD)Xcv%vxKf6!1tAeze!N@*cdsidSA zfPB-Uz(?_|kDn`D@O}wh=RBZ}zcufv|8zGhX+ggj1I98o;{{Gs-mfFGs&z*mi!j5m zYY@wwr_4JwOf%Vqti!ouY0H{n0ajA1wIj-^hfWD7a@oVKdA&yt6#AOIyiI-kS>1-z zx85>CWiD%^EzQxrb`xa3@vTMci@-0k!Yynwe=eCe^^SiZO3W(Q&_+KZ=N*p9l%g}e z;u}e@$q5iU1OPadQWlag^pPZ1eoywtb&Bd-a_uFWU;cBYOjqZyFzu#B%;uZ(N`m!` zwH}??;KDxuQp10oSI6KK-_BSWO|MaY8dVM4M>kog}Dn!|uKno<@zIMi$Rl38ZB%?BvE4 zLx0|Ifn6d*# zT3WJEf3!nwB%9uOlX^O6O=FFk+1G$W~h9jyPT+IMR2>C zPe<%Y_3+kUO_#BM5?v_nqVs;<3unw&pxwp)djJ}aWT;CoLIAs;a7GLT~;`5c%OXv zQpMbZqSJ9wxUjxdgxEIgrnoJzrQ(t#J%~%!tk9dOVHhembR7}lHmH}_poupYEa&o% zHY+kbuKUF<9aSikI@M@vVfB{^=Yp(-XodQdPM{6)`vA0i^vT}u}`a9LchxWlvB zZXk2a42p7brj_3s%#>)7#g81T6IC}yLxSTq zTU=nt!)Rw|%d58cMBAtO$7V7X#t{TBd@PJ-II4M2g6^=i2F2D&#hFGSn!NS0r$!Vb zjfmWi*0e25!oa56Bda=QvPvO57ojxw{tYJt%VQmqP31Nj1^M+1NVY^c3HtTli7kn+ z(#rivwcg0Sn;_fBM8QkECIKO>^DLJ#Uw8Lj2ieNo6yKuAyNn=S$0%>2Ty~EB1{&!f zb&}c|nbB!bi1cJ?QpND|JgSDqocpYILju9x`XzVlokzAG8pn6;Jnd-;wsmW7eMZMR zENIUBlpq7M0^lgq?F2mC|FWqPkaOehJTH>uTccddU)RQf@wse7npcMD>X(xp!7Mbb9}Iq2Q-w|}KX zWdB0CX2-q4uBHnsM+St_1R$#d_OjmE@nH~W13w=1*6aUOZt~9~eXtvW5X!vJhcg|P zD3P|GZH|Mo;S~`(lDq{?z?U)}433Q;uv}Y^<=a|g{e-OKl3Tb;bq>oDCcxD~gA{-M zPioBH`QyJwVQV*qy6r<9DifM6Kc_|kV}-z|N(*Gcxbn|~E;0Tyk(2U)d*rEmKRAC` zB59D%0Ut^*J!LeKu6Ao}fhB8`00mi0mtw$<27=r+2j%(xkd?hOn zrNX677b-gxPRDONfQd^NIo(>xz?nV!iw(jK?uaH}mTJbe z0919a$ONTDzv?bOAgnnu5@T8!i%8<<`)^2^{5$Kyf2jMv1{M6*zx>_HX}5!R1d;gX z(_EUvOjz_Z7-<+VMDt3Jvfr()BEbR%xlz~qMV*Fszzew|C$G-{EQ|tF|X{7{eb*$yk< zUI_r_t(~Xyp@;7m{-1c#`dhIC#lIg#=YQik{=mZ#jZ9nEoyxP!PO28tk|we&zM1bKa}% z9{QY%LY=n|eb3Rk*zuA;zc))h2`J+l7R_*#K55if<1U{h64^+g20~3Uq0_L@5^a62 zGn`VZ_ zn5I3Q?n?@3>_TK)#^VW5b1<`>r&hu?6AAYWjs{%)ah0g@Sea&E&&e_i$U(vPq10jV zHgs5rK_Q--Z2%}PGQU|pp47RsT>WLb#F$zhE61zPxAy%gn!KW-_oP+sy9)i2KGT7gxJL6 z%_qk(TBacx0n=3sb6gRrIeq2EI5+$rQ^SF55F~;UY*IQKVdh>>l0RwXz{7=Ow5cW) z=ak<1#BRsOl%RMtF)45QIt1n#-><573Hf9?%M-Wi0j!>W^PmF7(p z-pQUywn#fGm~4;wi8|Wb*WG96S%{5VQ}f#>;~|I>1(&Y5OgJqsu)NZ7xJ=`bYtP6% z5|TS@g5gbv8!u06zik)-^c86$sdME{G|en)LV&F28)Sa61O z<#^Uu^7am?FCiki*kNKzr>DJ7#nf%PrFBwrAv45ZOWT57+%PVB(x~&&@UJ{%;+*P% z{^*+ZnS5nK*Vk~I$g8}7C%q)=atlOq7NeqtK+>4c$H!)d$U9;60#zY8Qb}Tik&%U_ zc`8w>#SWZre0!#3z&F0-t~Xon{l4)Hdc-PRF<^-c_AuHh2P3ieeXPkd7sEK>eI;+q z4YH{&V@bgwUe#eC=|YR|Wgjh}qAX{c_63_H+dVNG9Dn90Fc+PLHg%2G6i#z;s6K_g z#q!rfotRu(Z}D&RNfASC8I`d_VfGB*3;F)NnV_t~P)#g>$?kx11t9@bIZdxz@P8^1 zqC2Hhb~IYNQzI^UV{7Jv2(huTSlNYXnNVJiZ6bN5=Eq1v^@-`eH}@xBVEY2ckM0nts17%XJg{rmDZH|m%b_UMDQTQkI`odkf^EfIW-LXYm5MjkP z?;d(S@xb$lP~~9d*OocQmS@MsRJr~sW(C-(pK{BtCWvf}z`KLTWA$zYE`O@F>Ua&qGXfGL+SdG7CYd` z&=_<%*tj2~=diX^SUAN# zSs&|hl%oyJpi0^3xf7r;&NBNG9s`_(%e1*355w8QgAIaG<2bqa_29z4`!PX0=q_R?F@Yh1u7S7ljx=@#&H@jZJN2 zM!O{)nLhuqfHkNgsW@{uN63?ss z_bM0v>nXyRn9AY@@sO` zBc$h;_{SPZ?g*65*GAu3^x|(J2bZCfQv2)zsP(K@@Jwbxh2AqHfc9!a|$aLh!`_13aNojRkcGu{Yvdj z@O&U|b`@S=>zcv-Oe}z9O@@^)drNGi1nsMOP3jGF+h}WWW}vzAn#^N9{@1+G`Qvc) zTPiBJeq_A&Obw^RPTVTdo&>zlf^;;qHZ1B_!;s^C^6*kDk9HY1&pe%QZ8^=C{*5oT zYd5_t{AJANM1iZuM#ujGApEWtwbAip$b7V{0D%@a*#>|%S&h}Wzs~zAufaI|}f8jmPf2An#w_fKT?b`3CwEOcbxSjb|834UO9&Z%{x|=)t^a(m=bztY|LoDaW$~X|=dYN`ArokeAw-^YAWIEq)x5^m;JiQ8 zVD(-=(CXcbjJx-ZAs@gg0+oFT6HQ%xlhTrVcPYX07p0>Lt$@;tIg1v70H4+_)bO04 z?+oC=xo>n_fsD$d_qZ4&W&I1f6)B%bKFaX+5 z5SCM|PJNZC&@2A>5T-V%o!{l(rZDYY^Zp=OT4j##qO*hL3I(ybT9jrI0w>uyMZ1V! z`U_9M{}EKa`}=VR|8d6ocP|PDuLqg;9PNFU$w&2t`Si-~S>e-MRY{eCC7gTsBJ}B# z6@8UP&DION#55v|gJkG|25&4i4vOLfJI_|4Xs(~~4Ajc$e#XfR#z#}u?W>k`gS6s- zf&l;(POD8JkC``K=^x2H+T>lCtX#-S565*G3kvcp`2Jk@=*NDH^Qj;vuf*Gw;Ej~N zvQs^){`jtlqf70$FZ@k;uo4OwuB&>n=#*b$RlB8bCv?&?h3LFwnpGE@H;=p+H1D1@ z!40S}Dh&%Ge&gCBWjgMMFPDae|Rx$4Dt)6@n>8w$<1>v@%ESyzSG1#EL6(U}W`gk}tkO()Qc ztTZjMA^1m1xZ}i|&VPY*iEjE6?=UZ=(6Z>WRdV+I`d_|({g0zezwb$Yhi~f79sObR z{aF^j)%wHc`z@*b)3rZrzP~P;Ki&Dm=KGuS`48HBmp1$MCy#gdCm&Ix2q+Jb)9gQu zf;Olb_~_b`$Xq3PHsJ5&yy{n!h$EtNDm3w;{<#ec%^*5wlu-@2_m$P;u#Vz70 z0Ld;I{7gbs?NIUa#MpaI_^2;tqAs!eM29BXsWHdXP)VVjQUA?6Prs6GU<+83 zBmE<>ractWoc`5rez#Fe7~V#3O^z*BK3nf7AuIOq_}A+xur@_;FDKQ=8Xc3Mjrr%l z9M28E^*4p}4|?3YH(9Tn!KZI$Bf_lNz1+jqh30 zS4elD_>KE44yaSv;+z@orhu_6@eYZRURp2^>t%de$xqwgN?koWafBUcwxFt$AIYAX z%E@j}JPB!&^d5&Pd1lnF^Gc?`dqCLkf#lF3DXw%JYH-a;vy;as83u!nYny-vgO&}F0qQpf1iL@8|4vLH8>5S7?A`6HV* z+Q!@scz-c{1+bC6j3YM`J$){Bh7o)IzP+ewd2nM(Fijm>o>G$_u?}=jNy*Xx&d)?K z$spnVz}vDLXT`p2$)i<2xA0KtYdvald*MEN{xpSIT9i(N6|}dw#*-bU!+T*48%2dY z;KlG)b;52k<=x5ShwA5ngk8|jeYg8BdI#F3PS4DUix^86NNJ~8(UCl+mqhe~DY|6Z zVl-twJzwqDpKuN$-Xh9)OV5cESDyas%H56M#ULsGSqY zwndAU_gATOkiz!3XIT$-zM>%Eg2FnFh@9IWJ*kn&n16%cEHI1#NA+t zT!*y--GtPN<@3m7LWye=xx{|9;QUj~Ors)8&8p@|Iz4Sc4@`vxmNh^oky10MQX8eSrZE%djFLCA_C*ocHx~iYE)o`IlE$rMOspp>C7c?ZK~JnX#zs9C zH|{$o8l=_}Mi>4-Ir!`dCYGJWq_OU`dIh6fDKC?h{M4nhGzS=%*>;w9o4iF4O`nkJ zeynPr$CIj?h`lHTv)n#5QKSOG_=G%UqpR>dHrJsr&2AsYK=+gjG(fnKzdT`U_ z2H<7zd4}i&cDy6sxcX6n2f{LQ!QTuX-w`U<%(SGEJ>y{l0nn)@Z*jw-Le1Jb!tm+) zITe$Z>^!PgwLHO|(pNbn7>jz1>XY>rA;#M>xKf}YLbBUWk!7;`o_{vJuFMVTt1q7{ zpM(v6+NI&4=SMj7eDtJlNzV_ zD^3=Ke>fzn*MPbR$R;jovmvd0wXRTbMonq?xb*l5eN`o&#}9qwK7_B_q$siTEJdr* zVz2wZ32(K7XZ1oSyGzqr`;=jV2+?NNr67jc{RJ2BbLZQeE=>59M&H7cQG{{s0mHab zX&B|Yg%;hO&Z^T{vWzdR$l+xUJ?8i57Uxi>aD4_ZQO zdKK^jFMxALHKZ!WD@jFCl#bIc*H#OYs)wkao?<)f`)fJdT7WdkGFN+ln-)7zHt!cmnOllGPlONNDq1{%ZOLduL!Q%s4ZPS0kmgUEL5@)*GK)@L9(q6^M9M2r0a@-?T~jC znprCyqSJ!{-}q$BUOS{&@5-&#wuZG$o(Zd^E^aL9yh^Vc=M-ruHM}}Ff0CKuEZAEm zvVq8}vbik(9F`yjc(ota_amfbzfKQ9SAgeugFvE*+=_=k#O7lWqcJ=p+Q_2@;I+mU z#K&rI=ErLQB9TxA)gN;y#J@=sZxgMKvxZGCQN)~V!TxWdOWF@VenlK^JsPN29Ia(^ z#u6O@WwMmdsVx&L{9|u1F$RFe_7cKkyZ%*~%9wXRr)Lq*C)mlO>)WeslJr5+3&%TI zo}x1Ns6}sMEXn^K!%C7m@8lIjJD)aPdqM;pu@qcio^6OvO#+pZJG`(R@;Xx7vnuk)1xyq1c_Z`oGS?(&p*1L>{U`11MTLZZ%f?OFQn^Xc(vw(_(@H% zIFJKqO}7BtASW2KUi2j@XG>l8ekmJu;IY_=z7$l=3Kmz9G;AC0D2q;?ui^f<;vKP; zc%R)UbpA1{Yx=ZEoPQKg*StQ>h1JQ)lSVFnD zR)vcI(3?5Hv}d9|bXrNDL~eL(!swlYG2ORX?3!YYgGO%hvtnwpY}j7!C9@K`Fm}@3 zQj&QIha_)tEx6Q9=%PU2*!Wt2ryVUhU%K)cY)w?BM>{LuIYcn+Q@UR>5zl!!>T3^) z(%+^T0O}iCXw*_k932S)V~H&96x%O}kqD^Zc3Qn`iFr9Q-F?k8`p8YI01j#On7=ZG zz9ZPV^abh(FOh#4k@*HY>r&+@ku-x$a92m{JWXiY?;d)O2C{w$1E*IlxcHk+6?`Va zlPtn}3-0$S_1f2Lkxn{L*`VI$)Q(i&15HUk&xyn zs?=GsFzwRIUPk8O9*6U5gRBhu*M`fA=gD4H#wc5c-AkpXfP0^I*SVx5PTp*Vp+IGlTG8i#Dhd0a^pw?IXc zh*B$$d!+T;M#&u8V0Sj2ky%ndSpt4lni3u_<;b5BHY0A;FqdIzeMPLePAnCAMtXB> zm6OeC6M9vv_nv9mvHT`<#ih3#C0lT4)()l#UD>dYax@{! zJN>ZHn2i|ASQS#pwFn5Ex^CB5I$ojZLtt6UyG86MZu!J(N<%F*WhPvVNF1gqclOctDGI)~s__iPR|;cit2V zg=fdU@C7v{=Yx>L8>T8$`57dC4z9vj{Pbmpod@q&H?8CCBYi6}Vx`qQ{S7;> z42TC>=0LWUZP6Ki5r`{WNY|7t?VKOxsbhlaWPc@jCRs1In6A8U&B(nbB>wt}e#N~( zvnQX^93XL44FN`nZ%O9)1jhH<*L%ek_)&xo5LSvLm1Z*VBp&sxllzGfWn_;whK$ff zeE|J4JPgvK5RE4?gY6MF8kIkon)C7h>ta2=Pg-}$ntd;+ ztLq|o1gTfkQKqJtuo=u+;gsNtheVXxdi~*;r!ST=vj*mSAl7s0ZF)VW@rZc1&U43U zW${wTLmM%JcVT-g!w#6*0bZ)x!2D91#pKG)X#Jk@rBIKhYu2B5+>@=|8)u!LwT*9> zrGAwa6kJKxBtjb*cUtlG?0&b)G$3w-O&ph{yjtsJK)ifD@Ind0f z_K$1m{EpPLe>T_T=PX4BAwfA!OLUNH-=w4DO=AOtAM&v7MUSl;am19>^Tm`?Pmz-T z>F1DEcw5j5o5C4<94TR6!iA(TZI(WHp#sL%T%YZ&PX^vrX}@%9&CdMdjHlY7iC=r( zVT~NL3fqeUY=1hFp6&3qxS0u^ElJW0h@91b)nB!nVnYE04k`kW$s{ByG<;6n{3kg4%d8wEc<%RWlrniO<)lo?>T3R+8FNX#!c6PSid-Y(ejdQ~JNvDOgYo_{- z^6nmN6I(dE!{5n?9&W)htM4ZarsxkwKo7tBf%_Umd8KDL28i`Iwh z6xEdP7|MtV9|>o^B$~EXN}HiuI#&_d@f~c5#eMMYxrB$0!p^C^tAsPN&MVuSZ-lq0 zTsemsRa@^raRM>BX`gZN`Hi%Q#l3=YSpOcK`XdxWQri zN3bDG@g!BsWv1l}6KbU{>{H&-xmP1aPpCwF?cYBBVZGE>+z*>*;_F&~%Z@L$^5T@yh z`NCv{0!D%J^ws`7~%B@T9S4vf)K$NktkyHsK1lX7Fm!qKe{o!zi^8 z5%$X=ud{CFpZDM2ypuKQ+p=)7+q^hoWv)P8GQOaVOW+kc>KSM>q1@JAedE(6e&d_t zJp6LI^$L(-DxvUIb*3}jQz@qlW0t_w+j^ayk0K&2oD~(YGa)-QPK%9)LWs0{EBSyK z<1CJCOkN^mPq;QwOpAE=8{e%=fi|7wTiGiPmr{bmKR5(t2d5qPA9O)}?SyXw1;NtB z89db6@I&otvK!>D7iWYZ!8vt0^nRR$LlgLRKcn+au?qsE|Ija#4QtLcKNiw8og6K8i9pQRhmtqdyX0|u1} zv&_`HKVUqQt!_>A?^gTknVP!7*TL~Hl~~cS&egi;0fU=Ectb~*oc4koQeFw%1>0?R zn}O^F>RPsy0P(Swn-txVp~hs`OQ(#xh+I>}1h~R7(_SYlSaB`Ef@Ym*CZeSwFDTf- zpbnC*Og z0Af=@E%7n`wT`j_L-hx8md={xuVZ&I*2`kUJl5J>3PZ6&A5`AN%=!^_p6Gqy8Vc4W zV?Fwk?pYSPh^f!y$`(zsbkG-P6$>$n^SFG=YFJ0UA9Y!?s&lrZG%1oX4uj9p?qWRj zt=QRj%RQBHp?5ZyU70G1okyU|MVkZ)Z+^?CC*Td)3 zkoisi@x&AsOZp{HeOtDoFAQp~#{x)+ERdl~sCal=D=Mf@)vNvXKjQxUyZ2@Oq}L%^ z*e~Ph*RH+0=anSv4r$Y4!7uc^DOLejpwv;YxT#FY0~uZ^Pl$XSDLsk{?7^5bYgAO| z)tn!yolkb`$4#PF>Pu;s#DrU;m>!iZpy&EH2q8O7x5aRO@;MYDpD(h7UlR#_g-}aP z0>f*ptOuTBq)0_Pfawp@5_Hw32T%mD90~$)f~()}J@q~QfwZI6QJLO6U1)1}(}f2t zoLf}WWCKP$)QkF39~aQl8ml*7-JRcSsQ~PJtm>j}{Vhr4$m9pOXBI zwqVv|XNZtY2+=Af%{G1IN>Kd4znMl<>Gbn=f2z>_jUQr=o!nC9i% zPxNqvjU_VXr7;L)&GXJfEZ5ocdcS?CmAES=I@!kB*Gfdkhol0}ZX`qPXJ+Irg+8++ zpkZ;SEr{hgeikwh6zvu!jW~BO$n@8l{1bvf;a$SbNMU!9 zwHFIB+s_XC>7$$v(_3Q|rTBUFskmDMP;MVfl>Ekmp_Ru9#B6rJPR;u-H_R|gt1;S4 z102wr%Wa(m@DP;eNSd|xpS`KN4)YaAT>8}p%NPR+9>eA4RlurOecNMUXwPh$b5hn! zePDFs9R1QIGb`-7L`)G{V}Ggj>-U?5uk3sg70}x~6X?Jo880NF3#15*p2v8Q^{1j)=^~gOlxyMh z{jo>u4gHQeEEe+eh0D6?J-ZUa=U@xa^>(L^1)voel>$#U4((8M=~!;rM3ssQyMWLO&0Fv1}o)?#R6b zs2c3_EYJ%=spj}6=q5=Kfa&T3bejnc2tfT8cZ&BFKd+5vu|ii+9jg`wYZhC#+kxT*V1Jqg zL@O&ImdEGJrW3K`F)P{TjUv?w2dLhS<9yc9vfd?|9_`8<*H~4;z&7&L>u#kEcZj5d z1(y_B6p=X)Pw5JrLI4sDc)JZ>yF9W8EfKdW&Aa)_m*|W9} z-oB2@PHjMCkl@&=LKr+oB6Y^+$kyZmp^klh-oCF6S2e^l1`%Pe#7LxoHu-^iP=pLd zU|?r8(2W1ZW4=t{K!)EItz33=XFP! zzHfX1B&uawMX-g9_C(S*zT8C^YFAbE&@qKSnL$2q)+UIvE<3#xLf7Psem*Xb!!ejEi zSj^8QRe$YhyKIz`(y6? zcdtL!L|=Gp%N0er0O&oDz)3RFvbO zQ)GlW&_0rmj2s&pGCL*6t0IQ|=WXvp^SQ~@S3h%}aL%3F5_)BW;x}SwRos03KIB<# zPI<4Srn!UJ!+63t6zaD^!B6DP{G1Rv3&&O1XHFHxKo!){;9V8Z%N>Ldk>$pB6sT=)u z!v)>yU*%x%Y>?H(D>?0`ERC`aJD@`o9OG1;-flSy-uLCIh$sA1cQ;OwDnQTI(p}iB zp1{eSs}WxG3P+68e*byr+QVc0r(Rd`cb#iU7nB9rJgA`I$n&=g5v`dsn(7X)ajR-+ zFf#xl1rh2h+*Zx&gZuG-lu4&cK(2M@o99cQH0BXa$=4*KO0fy$f^G~HwgwRpu%1tJ zWdo0&+rFOeRAudY%x-@;T*>pkcV`_?S3B4Ev77#OydQ3*`W!vj0{vy0sgYn?XNtAL zd~vU<-qzQUr?~XRFni*~V$pKCP><&MD&1g-sKpJ0nc$6$`joR_cX?b9f;K{_9$Bgq zJhu|7yZ5%;xmeN-Sh>+Xhs~V{NzK5}!SogX^-TeK*{Iq5qemPsxP2&GwbW{V)L@3L zv6<>7MaNPX{CHeyQ_K+AnaId!Jo)Qr(2{Gzq^MS4gSypY58%Y?RaI{iE|%l{Ofa`F z78n5c@Ey7fQI&^VX-*3E{jJtu zAU*`PlH&yZkM^;^UKh<^GC4NLpuDW$jt4>nh#l@qme5zokniGP*Ns?|-R;aL0u0rXy3^)0$=e!)hOp0|(d=TJ7wQ=0;;5i3ANvNjeLNbh<)) z^uS3od5)>R^Lezug0&FU$ouSkK@m;YB6Mc$2-sNpVRoZ3$dLI3#_~W}z=hJ4^CT)UsVdFk99&7x-FCo;r z1-wmAV)fL3b!H=O*RQ6XfDNTMM!-A!pIvrRh z+GNTOHkSr1YIp6M@FVzCE!PO7$38#E*K2w~)x;Pqm7ibI+jY7lJO3-}bdMgZ?FZcn zN+&%1Gs6MO@y7gUWZDD1_Fio=TAot$9WMWMekErbnQ|BqDA{BT*U)Nh#D!*di|p2t zL8Xc3ctyr)e@p-NqobPwTZ*7p<{u3K(XHbUaTyskMed5xhLml+SvU-G_~e}cE8F6o z6hTJm;GH~d3Fl6wC3`f}qOZs&pl^9`i^~~@?jAP8#+=74fwNzHVSeB~zFF80wt86G z*CBK>b)vJ_7^ql+o_hJw($7zSA+kX)uo5%@9s?_qJ@Yw)%T<6`N=zE^wc-va1_B#c#>_pH~CCZ;^+}bP+LK-u5rSsZ!rtj*W-GI z3$G?;Qy|&W9G5ZEd6I*EcFGcOYfH0_w)fr)&@9jaE&=NTqBqdMPkG*o6TQehzgyeU z7bas(7i3qt0w3E>1q=;TG(D&aR2EcJ=$9+nJTAfJZ+nIb?EWfJ=NN*+KadG+!V4wm zMdYc=_gr9z;PtoDo zewS1LxK&QShf@2ze2Y(*i5=xI;ZfdWiQdXt`9FZt#wLiFQR`DS_q-oS%7H~Fw(GXV6Ka0AK>q33!c#;n zekm#s`v&8S{>3h*!7Jdb=Tr$IMlOBqb)lzyL5n1gx8+AJLIfb9v&dLIO$7S*6R`b= zeDrokuhAx{#5iHP0~8TFs}v}DT75_Zs*yq;OFo11QQ7V^jhb#Lcj}>IKV8|ID|JPWE(*ZVI2uXUY=)c%*dzko$nDX zv5s@=1&Z9S9j0Z?C7PWN$&pPOX?FBpoF+Xr#l55W)3GPB!Y5NDv<70#i|>VT@4X4R zYT4UcqprIWr~vg(Umz6JLZ7*C(CR|Th`rFJYy2V&h!K6Je)4nE{)!lNu$jrsg*RkO zg07SU?AF*A4?{e(i``7eWmrt~M+n5$e)m};!bA!-*4uUf&WpI7WO0UGSOk8wy{8Eo zEVRo`bZZgOPO_cUG9|O|y^~3X`Bnb+sp+2kw8CSDw<)>aMmxm0(}QMa{uM-6Yf&89 zZ~J*-l0Df^OGYmcKsa~o@?0Rl`{#}8l%_*@3B#WCg(cQS-Mw-lWvBc;F#V%5ZTo5t z0$~;_*lLYyS3t#ZHLJ(qAePt%VY>^cBdQ8lR^-nV8f{T!JRRz}f^$}#JOTxsSSVy> z_mw`gg|cE-Cj4voceJUPiXs|>mPN_(^bYt!iS@e#pup(boTXH=Q%Qm1$j_$iqV2_B zIK`RZoLYnHo`uoB-gBozXr{11jgZzsQ+L_jYX{r8@r_m)o&i<&^1rvgX74YtVS}t3 zD9GgZ>$SZ&JFscn1VFNJ@gf-MjZUeu?c zQh9-w%v9e;A$uluxf2+OWgHI4Z8>)hKe5R-@$c-Ue^cpxE2yzo8sH}fUI*Tji>Famp6dpW=x1Ts1vd7`((Y=RN4-A0 z#e^52;AGkk*(}V^cR1`^mYU7bRC5pR7IEB)w^adnLrEm~I6D@u&a z?$leC&X`Jw--=_IB*7H@xX`e$k^z!Oq;m_;&o$`7opBmK*o_s3dkq zRk|$4`(xcN_(whAp(a5CyrerXhnjWGdpj3jm(Jhlpcr2ub^wpakZJ~JlY~NAD^@fi zIwFzjQnp!*6oFHpBDD7D89aLT@6|C?-wjSS%ujw+@2AVFR<-^2V zr0km>Jd=Fm%IA_+lkJAZWY`1cF4q{^B@E2iSy?!WL3*oufg+etyCb%J04ci)fLz4b z)6(^H-*DG@(J0U>2(;!(Xe5#ufS0M%>nPV-6x6iGzsgvr4l-d5IhOnI@o6LdO$DiL z`8NwI_iQXA-1nO>F?2?gxIIy=FjP7;UDct9P;QW4$mBdo_llTKFRQ>1A|3Z(`&Yd% zmV{CXigI(zqGlN$Un`c3A=E~AaJ*E!-@J|0Y8<+^+|lZDod$2k-o3BWJXI=#w(l~ z4#fTXl4w7KFk0odGS?}DzM@CCH&5@?3SzhFm?fj`-CtMQPX~H%ENsqUrwDYm$RIRe z?L;$ay4MAI)BIP+=kjfasrHq(OnckX!=1p0a&p^!_dqK!7>RqcIO-^*yZW(lLpe;4 zKnM6kEpGfGDXGK9_xfL^&u=w;xO1&}S8z^z@5`bZ+c-ku`B9|x5B#XdmjoDW6I9DKm z`{U*zNPznKBVRb_t2zVrP=@WEc$;@hDx~!N+>g~9jH=LTT=bhSH+7S@$45pJqKdbQ zGGMblCzGH)OoajpRDH7WrDdy8fCW!KEnHE~KC}Q155BKcj~8GCePa9!g-8AH$akqB zD{Vxm>iCb$hw7gS-^u{_FQt+a10lEt%w%$AS72OB2Ej?Bt$wd+=?6hUb@B|*dmB5j zmVixZ`J9{Y5}l6PfX+D!?f~*ta0E$$4-;&=|N5POM`XVx#{cnaj`yES$upi4hMs(0 zw;{9Ih+J{;(*QEddBpGUo@k}A?91)P*(W%XHOWpfe@QqJKQ!es+u4?-aD!oT> zrH8wg`sp$YzuJwTMuYWu;Ht5Rg=ikCAMc0o6#FQmWEyVfU14DefmzA_^ymzyeM{Oe z+qN&C0)q*h8r4ebcg_(~^!h-?F8yKwWGkiGyEQGhT6&d`M8#rV;>^xLN>uc=G~tUP zQl&BaOPfl8dkSb6DgQ&+heH4lN7Znn^v5!{{gROeZWZU3kPWE6?3x|3Py1b-&Fx_WW8n z7o(&{b26a**xec$x72!a-SKWE8@dRUBN*ibM*ojPou0V9Kv1SLPW5T|JST>~q%`Hev#;U(ssLmK1zIk)V5M6=_d@u8~JdcXoNrlVF4^A%*Y6b8BFc2yYCaVDDw?mD|e1a^7P+ z8_$W>z}Z4)=f+I+=+E_IZ&ju97b-2ruE{I}!4)YRyBj8J;ilD@C!S#R3dn^sRAzf#E&vYV%t3)wymSm2c@XC~t^-f5V=HKl-a(2~af7h~hhjVYJClm&2knLhu8#5hzKkvBu%(4!nA-h7v9$+;Spzg_QcCFuyCWQk zvU5isngVAZj^3eXV7VPe9kbCUwyT&vF3K0yzqhiP0IrYvaiFU~bmZ=}I2&x4kMWi{ zlO~%df3Ro6Qs0N->4%nikYh;IGP`Ju@| za})uA*Ad3YJ7Af$tLWK!p40D9DHyYUR~p>y)RcyEd$YN(=LW8~I~VR!und~xr+ca5 z+tc^Y*7eSE6l+}F7LLzUE%&(g*q62`Y!!VXRa%&V3O8QPzrIjY{yi@a$Ayg8By#@v0=U$uP?`hNT^ZgP^y( z9RSp#Q_Wx{v}noIRC1rGp-~jrbF@oJ?pHS|#nS%{f7$Snrri3s_*x?2W<=l!}ar}xJ+XqUU32YCAk?vTH2$5i5osV|5rykyZK z^CWfWtuxph$WBXx&#g(kX}AkUUmS&T6u6a6Z+D)|_)^KmH_|*-Q=QtoffsSN{rz9sDwkz8}}L-2T|<(1 zunlVzK`K@H;2iU>jL*+7M{CE?yWq%4gAb(wY27QX(aT-oh(Q_)X)=IF1Zt|TA-Aa{ zv8@zUKf&$^P|NuFYtSw6J0jgW?Q5!f=Uip7^@E-2f%5)Iq_u2Xa6qV4!2eO^)gmQ3 zP_FIdO*Q954oC2;OuL4b=V}I}UHrXy@0p<9H(wq-I^s~Z1lS65fW*cgxlr|2ds-V6 z&v#3<9V)TSwH;Pt6qP3-TSW5j9xhdIj@g%;ol|vU0)6!M%O!>2OvRi!oQb{d*&yR7 ztwgp;1iZN-XFGmrkv7RZTza29{_t+oOUGsg)N_k}x zZ2nuL?FK9zgReH3?p5!0MZ9@$f-7B&EdHj-NIPQIb$)%_;RO(%TRn@YU{v;HU6R0$W^a3_99A&_B#vg7YLRtaZDs{J({W$q zDM<}bviA)tn9I6ZlDY-!(yI>*ypzb!iB{<7TNvlJ5|z4T5Y=@!DQIZfDjl~HxrM3^#*M%J5eTh) zLVNIxl!#>?%)Zfj;kU* z;7Y(hZrp8;3#)B|8Di`vsA;xusFL|?{s!ASO0e@{a%5uNk-dh7Wl4cONQaF+<4TZ(FVI+7JbF z!sa69of4EH;72i`xv7)kz+;pcRF(*tUnhwydXt-I0}2PLIfGn`jfQ)%%`JQMe�g zm0V#LJf6|Ue6Px!I)AoN2cOI&EYB}rhj@0f1{=#jAg){4e0))@OUU5n=7^7L ze#PBe)*+9bB0Gb+OA|R2BJQ~vwI-&9uwPfDU5dIlZU?XVHhWhiRCf1{^Xb#k5+>z` zI-Env@+Ic_PFc)5uFAF^_o;0mTHd5CWNXB<)yC(XQxG^^is;*DmgTx9SpNq+ClKO3!hYyGOzNE1egFHT3vGHA%g*YrPp8$T-W#g@1NM0x zjAAyilj51Ivik?(nqLS;%Fh|N9Ss({+$I(EArUSD9iB%Iv$WGri)Xx`3}DKpORnR3 z$*bW?hw@9$fwHmd-feF{S0?s3(@sj80shV|~ z4(}^;_{x-#CRc=w%uqR}vIBig`~y|55)@HulNzjwue+IT zy0bU)>v9O}#8>|A@yzC&$`%>fkmP8M91d1gj=D$o4_4yeCg#_#8I!U5&00OKc?MN~ zTDj$RYUh~`P<(0&l2uO5oU4Fs*lzs^Sj%3NF;>lz9wM4$h&eQ<2na>%`ezk&9e$Ec z@=SueFhLrH4=*sc|9Srk-&+|uKX}~PiBU3;f7kuV@zk|@f;@H<5EWduK|55fqC7NM zdF$JmGQ9NgU#pMa380(-qJMNpK*t+qWoy90tfWr}<1|t=x@$kUWr73J&;XeSt0(I& zDd&ar-Y>0XPu2W@jST9UdTy#BF(AVZU9t?9kteP5QWP|8wU*6=2$H1t~L z*rL=kv1Q$3X&T#@AAJZJZ!1yIc7?#nsa}&TxlnY~TCD8jZ{QEEaNs(*aADtm`l+u1 zKCk48|917c!xe=S!nayOwlevzKi;+>r+mO(cvL$z)V%mNs{<5=z1XbFQo#0_1G#e< zZRVuaiPd_Z`>DZp z9E|WmLuY<4W*^tE7(e zdB%^I6#pX8Zf`ZdcJL?0%I?&7h|>A?GZfdGb&1#}G5rbSbL!w=ybK{Vh7S8mURq1r zFYkV`(7nMi^Q_Z}zE&7A9%`nWJyDhqEOs?jVP7v;2+I~S6R6{O6Qa0i^r|Mh5ug0k z;KXXVx4G4OY->=mWk4h^Hfq+WwSe6!`Cse zV@Us*JT9LIb);WL1QTMBsQ$y zPuH`e95d~;>EE4gS-QwcS~}OjX82h?@=gEwgO0&%PO|aPUEdjiS-uiE&E`gb(9A`p zlY-Y}v(L|_MHq#aCQR{O+`3Qc_tbrSlQ6*}_kOS!d#FRz63OOCf*j2x-?eUddzc!N zA^RHV2T1%CIPoe%b$Aomc0uZuKT`i|*V6P)Z)>}bXT=ie59iVOwz4>?n3M!p>&J~t z8QCgI$9j2s%{q0|Q{s@uV3p%SzC;A&yT_C9t%I;ID&~%EH2dygAIL%EBxX#=5_q}8_RlzX2^g@gbf54$zHVnxDpyLv>5#JLGRW* zX-7BC>v?#8v-|}uhns}$;yky9?`~5MuL~tsvpXNphBb7orRa(RX+PF2(x_r`QS*$a z0}`d2OGs{*Qc_JjmLRA!MWP6jR2RHm2>j)uN!TbLzPmo{1XE5%9kA!|nY?`Miov3c zVQ(Q|N)`r3rOvInl_v@2YC-i!f4N^P2&pDkOW2q5E!|=rhCwm4w8Yis(XwBt3iwO- zgh*lj9-(y5wStZ^7pecPf_G@^W)I9ui2IrS9VgiP|K|@aCq(Oa0}5; zv!?O)eWm@ilB(gT4+|CLVaSj-4OZVSVCO8ElrvKXp;zG0@lzqombf(~Q$goEj3=p2 z=hQsnVI3CRIpQI=WKwjwj@%UaI+W(}CMW)`a2!1~TB)TMS51-6teXV?1oe(|r2FuS zL#y8m&?1X+edbrqhyjtONqxn_{x;B!V(1b+SHo%WF(4{cac?iG%|=d6+tFLi?Wl42 zzHf`}GDxm^y&Aq{Y3Y4iP`+ms7Vv%Fu@`DiU+x#9t0UFR6mImrn(5;M_#|CH%Q(g6 zSC%)Qa6O0X9#8w|XsG27+{IAOIyR8AYg1G+(;i-h!f+x3278%7SUdi_Tys1wlPyX_ z5?suTL0^zb|IwcFr-UI6=l3KYP%UFg1BylIMr?HFb^00EGZ~ELO{T^swZ%uIMh%Gy zX+2Sl(pMNscYaypGOS%^r9r#{0^N|wWT+Jl^D4B;*P3DflVLRmU=(CjXSgAz$=%i< z4ch|bc3XE$`TLLJS^nLstPBR*#uW8RKIbCI(YTn(+Y)&oqgMtxx9>W)NP{vjarH`4 zNj_OG-pwbl7ILb8%cPP`D%8^V4(MkL!|#pSvs_iryn|m+0YkXxw9P5{kT20Za#DY% zb4d<|p=boD-lvbb*l#|3dxcsavbJBlsK&#_L&pt<_nGi1+EG$D6EF_7wuXsRjIr}o zj96^;pA^;N%Z>EMG&bN&$EA~p;f+pI9#ZR4Z!S``A2t@IgjBlKKz%8b$8l8eAQn9= zL^#dMq)vjs=G`-=bKL!90girtjAs3CXf19z1Jsb{T9%Jme9|Nke5LVP;;Rpz%~e+^6o|IPefCful8E14C^b}cR9Z)yZ);si61l(D)=mdR5DrDsZ>21^N*?8v{;vX`)vH21;#vk;4S!=lb{WN6L1i@3~%N3?LiMOPmQvU=XAx zkzKOGtAKQlBN9{v_^=f)D(RU|>d7|Vw#(Er_0K=`+nnbsHzv|_@;>XGzdoREv=-S& zv1~zC=;uwi%?^%h@0>wFs}YS>|?e?`Bdy{J96~poZ^xMmJvIO zxg{Szy51W$xaR0ry&&mBdLgZE*na*NxPHeoNi6DIIKJddZawn`#;@#T#va25kCo?A zR`=qw7J=k@HTieYuIC;tdKZADySYyOqx21n;HmRu5~F31QB$6DYD bool: return False -async def _remember(hass, call, start, response): +async def _remember(hass, call, start, response) -> None: if call.remember: # Find semantic index config config_entry = None @@ -182,7 +185,7 @@ async def _remember(hass, call, start, response): ) -async def _update_sensor(hass, sensor_entity, new_value): +async def _update_sensor(hass, sensor_entity, new_value) -> None: """Update the value of a sensor entity.""" if sensor_entity: _LOGGER.info( @@ -222,6 +225,9 @@ def __init__(self, data_call): self.expose_images = data_call.data.get(EXPOSE_IMAGES, False) self.generate_title = data_call.data.get(GENERATE_TITLE, False) self.sensor_entity = data_call.data.get(SENSOR_ENTITY) + # ------------ Added during call ------------ + # self.base64_images : List[str] = [] + # self.filenames : List[str] = [] def get_service_call_data(self): return self diff --git a/custom_components/llmvision/config_flow.py b/custom_components/llmvision/config_flow.py index ef3b6d3..3c33d3d 100644 --- a/custom_components/llmvision/config_flow.py +++ b/custom_components/llmvision/config_flow.py @@ -1,15 +1,21 @@ -from openai import AsyncOpenAI, AsyncAzureOpenAI from homeassistant import config_entries from homeassistant.helpers.selector import selector from homeassistant.exceptions import ServiceValidationError -from homeassistant.helpers.aiohttp_client import async_get_clientsession -from homeassistant.helpers.httpx_client import get_async_client -import urllib.parse +from .providers import ( + OpenAI, + AzureOpenAI, + Anthropic, + Google, + Groq, + LocalAI, + Ollama, +) from .const import ( DOMAIN, CONF_OPENAI_API_KEY, CONF_AZURE_API_KEY, - CONF_AZURE_ENDPOINT, + CONF_AZURE_BASE_URL, + CONF_AZURE_DEPLOYMENT, CONF_AZURE_VERSION, CONF_ANTHROPIC_API_KEY, CONF_GOOGLE_API_KEY, @@ -22,7 +28,6 @@ CONF_OLLAMA_HTTPS, CONF_CUSTOM_OPENAI_API_KEY, CONF_CUSTOM_OPENAI_ENDPOINT, - VERSION_ANTHROPIC, CONF_RETENTION_TIME, ) import voluptuous as vol @@ -31,227 +36,6 @@ _LOGGER = logging.getLogger(__name__) -class Validator: - def __init__(self, hass, user_input): - self.hass = hass - self.user_input = user_input - - async def _validate_api_key(self, api_key): - if not api_key or api_key == "": - _LOGGER.error("You need to provide a valid API key.") - raise ServiceValidationError("empty_api_key") - elif self.user_input["provider"] == "OpenAI": - # TODO: Implement OpenAI handshake with OpenAI SDK - client = AsyncOpenAI( - api_key=api_key, - http_client=get_async_client(self.hass), - ) - try: - await client.models.list() - return True - except Exception as e: - _LOGGER.error(f"Could not connect to OpenAI: {e}") - return False - elif self.user_input["provider"] == "Custom OpenAI": - client = AsyncOpenAI( - api_key=api_key, - http_client=get_async_client(self.hass), - endpoint=self.user_input[CONF_CUSTOM_OPENAI_ENDPOINT] - ) - try: - await client.models.list() - return True - except Exception as e: - _LOGGER.error(f"Could not connect to Custom OpenAI: {e}") - return False - elif self.user_input["provider"] == "Azure": - client = AsyncAzureOpenAI( - api_key=api_key, - api_version="2024-10-01-preview", - azure_endpoint="https://llmvision-test.openai.azure.com/", - http_client=get_async_client(self.hass), - ) - try: - await client.models.list() - return True - except Exception as e: - _LOGGER.error(f"Could not connect to Azure: {e}") - return False - elif self.user_input["provider"] == "Anthropic": - header = { - 'x-api-key': api_key, - 'content-type': 'application/json', - 'anthropic-version': VERSION_ANTHROPIC - } - payload = { - "model": "claude-3-haiku-20240307", - "messages": [ - {"role": "user", "content": "Hello, world"} - ], - "max_tokens": 50, - "temperature": 0.5 - } - base_url = "api.anthropic.com" - endpoint = "/v1/messages" - method = "POST" - elif self.user_input["provider"] == "Google": - header = {"content-type": "application/json"} - base_url = "generativelanguage.googleapis.com" - endpoint = f"/v1beta/models/gemini-1.5-flash-latest:generateContent?key={api_key}" - payload = { - "contents": [{ - "parts": [ - {"text": "Hello"} - ]} - ] - } - method = "POST" - elif self.user_input["provider"] == "Groq": - header = { - 'Authorization': 'Bearer ' + api_key, - 'Content-Type': 'application/json' - } - base_url = "api.groq.com" - endpoint = "/openai/v1/chat/completions" - payload = {"messages": [ - {"role": "user", "content": "Hello"}], "model": "gemma-7b-it"} - method = "POST" - - return await self._handshake(base_url=base_url, endpoint=endpoint, protocol="https", header=header, payload=payload, expected_status=200, method=method) - - def _validate_provider(self): - if not self.user_input["provider"]: - raise ServiceValidationError("empty_mode") - - async def _handshake(self, base_url, endpoint, protocol="http", port="", header={}, payload={}, expected_status=200, method="GET"): - _LOGGER.debug( - f"Connecting to {protocol}://{base_url}{port}{endpoint}") - session = async_get_clientsession(self.hass) - url = f'{protocol}://{base_url}{port}{endpoint}' - try: - if method == "GET": - response = await session.get(url, headers=header) - elif method == "POST": - response = await session.post(url, headers=header, json=payload) - if response.status == expected_status: - return True - else: - _LOGGER.error( - f"Handshake failed with status: {response.status}") - return False - except Exception as e: - _LOGGER.error(f"Could not connect to {url}: {e}") - return False - - async def localai(self): - self._validate_provider() - if not self.user_input[CONF_LOCALAI_IP_ADDRESS]: - raise ServiceValidationError("empty_ip_address") - if not self.user_input[CONF_LOCALAI_PORT]: - raise ServiceValidationError("empty_port") - protocol = "https" if self.user_input[CONF_LOCALAI_HTTPS] else "http" - if not await self._handshake(base_url=self.user_input[CONF_LOCALAI_IP_ADDRESS], port=":"+str(self.user_input[CONF_LOCALAI_PORT]), protocol=protocol, endpoint="/readyz"): - _LOGGER.error("Could not connect to LocalAI server.") - raise ServiceValidationError("handshake_failed") - - async def ollama(self): - self._validate_provider() - if not self.user_input[CONF_OLLAMA_IP_ADDRESS]: - raise ServiceValidationError("empty_ip_address") - if not self.user_input[CONF_OLLAMA_PORT]: - raise ServiceValidationError("empty_port") - protocol = "https" if self.user_input[CONF_OLLAMA_HTTPS] else "http" - if not await self._handshake(base_url=self.user_input[CONF_OLLAMA_IP_ADDRESS], port=":"+str(self.user_input[CONF_OLLAMA_PORT]), protocol=protocol, endpoint="/api/tags"): - _LOGGER.error("Could not connect to Ollama server.") - raise ServiceValidationError("handshake_failed") - - async def openai(self): - self._validate_provider() - if not await self._validate_api_key(self.user_input[CONF_OPENAI_API_KEY]): - _LOGGER.error("Could not connect to OpenAI server.") - raise ServiceValidationError("handshake_failed") - - async def azure(self): - self._validate_provider() - if not await self._validate_api_key(self.user_input[CONF_AZURE_API_KEY]): - _LOGGER.error("Could not connect to Azure server.") - raise ServiceValidationError("handshake_failed") - - async def custom_openai(self): - self._validate_provider() - try: - url = urllib.parse.urlparse( - self.user_input[CONF_CUSTOM_OPENAI_ENDPOINT]) - protocol = url.scheme - base_url = url.hostname - path = url.path if url.path else "" - port = ":" + str(url.port) if url.port else "" - - endpoint = path + "/v1/models" - header = {'Content-type': 'application/json', - 'Authorization': 'Bearer ' + self.user_input[CONF_CUSTOM_OPENAI_API_KEY]} if CONF_CUSTOM_OPENAI_API_KEY in self.user_input else {} - except Exception as e: - _LOGGER.error(f"Could not parse endpoint: {e}") - raise ServiceValidationError("endpoint_parse_failed") - - _LOGGER.debug( - f"Connecting to: [protocol: {protocol}, base_url: {base_url}, port: {port}, endpoint: {endpoint}]") - - if not await self._handshake(base_url=base_url, port=port, protocol=protocol, endpoint=endpoint, header=header): - _LOGGER.error("Could not connect to Custom OpenAI server.") - raise ServiceValidationError("handshake_failed") - - async def anthropic(self): - self._validate_provider() - if not await self._validate_api_key(self.user_input[CONF_ANTHROPIC_API_KEY]): - _LOGGER.error("Could not connect to Anthropic server.") - raise ServiceValidationError("handshake_failed") - - async def google(self): - self._validate_provider() - if not await self._validate_api_key(self.user_input[CONF_GOOGLE_API_KEY]): - _LOGGER.error("Could not connect to Google server.") - raise ServiceValidationError("handshake_failed") - - async def groq(self): - self._validate_provider() - if not await self._validate_api_key(self.user_input[CONF_GROQ_API_KEY]): - _LOGGER.error("Could not connect to Groq server.") - raise ServiceValidationError("handshake_failed") - - async def semantic_index(self) -> bool: - # check if semantic_index is already configured - for uid in self.hass.data[DOMAIN]: - if 'retention_time' in self.hass.data[DOMAIN][uid]: - return False - return True - - def get_configured_providers(self): - providers = [] - try: - if self.hass.data[DOMAIN] is None: - return providers - except KeyError: - return providers - if CONF_OPENAI_API_KEY in self.hass.data[DOMAIN]: - providers.append("OpenAI") - if CONF_ANTHROPIC_API_KEY in self.hass.data[DOMAIN]: - providers.append("Anthropic") - if CONF_AZURE_API_KEY in self.hass.data[DOMAIN]: - providers.append("Azure") - if CONF_GOOGLE_API_KEY in self.hass.data[DOMAIN]: - providers.append("Google") - if CONF_LOCALAI_IP_ADDRESS in self.hass.data[DOMAIN] and CONF_LOCALAI_PORT in self.hass.data[DOMAIN]: - providers.append("LocalAI") - if CONF_OLLAMA_IP_ADDRESS in self.hass.data[DOMAIN] and CONF_OLLAMA_PORT in self.hass.data[DOMAIN]: - providers.append("Ollama") - if CONF_CUSTOM_OPENAI_ENDPOINT in self.hass.data[DOMAIN]: - providers.append("Custom OpenAI") - if CONF_GROQ_API_KEY in self.hass.data[DOMAIN]: - providers.append("Groq") - return providers - - class llmvisionConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): VERSION = 2 @@ -309,9 +93,13 @@ async def async_step_localai(self, user_input=None): if user_input is not None: # save provider to user_input user_input["provider"] = self.init_info["provider"] - validator = Validator(self.hass, user_input) try: - await validator.localai() + localai = LocalAI(self.hass, endpoint={ + 'ip_address': user_input[CONF_LOCALAI_IP_ADDRESS], + 'port': user_input[CONF_LOCALAI_PORT], + 'https': user_input[CONF_LOCALAI_HTTPS] + }) + await localai.validate() # add the mode to user_input return self.async_create_entry(title=f"LocalAI ({user_input[CONF_LOCALAI_IP_ADDRESS]})", data=user_input) except ServiceValidationError as e: @@ -337,9 +125,13 @@ async def async_step_ollama(self, user_input=None): if user_input is not None: # save provider to user_input user_input["provider"] = self.init_info["provider"] - validator = Validator(self.hass, user_input) try: - await validator.ollama() + ollama = Ollama(self.hass, endpoint={ + 'ip_address': user_input[CONF_OLLAMA_IP_ADDRESS], + 'port': user_input[CONF_OLLAMA_PORT], + 'https': user_input[CONF_OLLAMA_HTTPS] + }) + await ollama.validate() # add the mode to user_input return self.async_create_entry(title=f"Ollama ({user_input[CONF_OLLAMA_IP_ADDRESS]})", data=user_input) except ServiceValidationError as e: @@ -363,9 +155,10 @@ async def async_step_openai(self, user_input=None): if user_input is not None: # save provider to user_input user_input["provider"] = self.init_info["provider"] - validator = Validator(self.hass, user_input) try: - await validator.openai() + openai = OpenAI( + self.hass, api_key=user_input[CONF_OPENAI_API_KEY]) + await openai.validate() # add the mode to user_input user_input["provider"] = self.init_info["provider"] return self.async_create_entry(title="OpenAI", data=user_input) @@ -385,16 +178,21 @@ async def async_step_openai(self, user_input=None): async def async_step_azure(self, user_input=None): data_schema = vol.Schema({ vol.Required(CONF_AZURE_API_KEY): str, - vol.Required(CONF_AZURE_ENDPOINT, default="https://domain.openai.azure.com/"): str, + vol.Required(CONF_AZURE_BASE_URL, default="https://domain.openai.azure.com/"): str, + vol.Required(CONF_AZURE_DEPLOYMENT, default="deployment"): str, vol.Required(CONF_AZURE_VERSION, default="2024-10-01-preview"): str, }) if user_input is not None: # save provider to user_input user_input["provider"] = self.init_info["provider"] - validator = Validator(self.hass, user_input) try: - await validator.azure() + azure = AzureOpenAI(self.hass, api_key=user_input[CONF_AZURE_API_KEY], endpoint={ + 'base_url': user_input[CONF_AZURE_BASE_URL], + 'deployment': user_input[CONF_AZURE_DEPLOYMENT], + 'api_version': user_input[CONF_AZURE_VERSION] + }) + await azure.validate() # add the mode to user_input user_input["provider"] = self.init_info["provider"] return self.async_create_entry(title="Azure", data=user_input) @@ -419,9 +217,10 @@ async def async_step_anthropic(self, user_input=None): if user_input is not None: # save provider to user_input user_input["provider"] = self.init_info["provider"] - validator = Validator(self.hass, user_input) try: - await validator.anthropic() + anthropic = Anthropic( + self.hass, api_key=user_input[CONF_ANTHROPIC_API_KEY]) + await anthropic.validate() # add the mode to user_input user_input["provider"] = self.init_info["provider"] return self.async_create_entry(title="Anthropic Claude", data=user_input) @@ -446,9 +245,10 @@ async def async_step_google(self, user_input=None): if user_input is not None: # save provider to user_input user_input["provider"] = self.init_info["provider"] - validator = Validator(self.hass, user_input) try: - await validator.google() + google = Google( + self.hass, api_key=user_input[CONF_GOOGLE_API_KEY]) + await google.validate() # add the mode to user_input user_input["provider"] = self.init_info["provider"] return self.async_create_entry(title="Google Gemini", data=user_input) @@ -473,9 +273,9 @@ async def async_step_groq(self, user_input=None): if user_input is not None: # save provider to user_input user_input["provider"] = self.init_info["provider"] - validator = Validator(self.hass, user_input) try: - await validator.groq() + groq = Groq(self.hass, api_key=user_input[CONF_GROQ_API_KEY]) + await groq.validate() # add the mode to user_input user_input["provider"] = self.init_info["provider"] return self.async_create_entry(title="Groq", data=user_input) @@ -501,9 +301,11 @@ async def async_step_custom_openai(self, user_input=None): if user_input is not None: # save provider to user_input user_input["provider"] = self.init_info["provider"] - validator = Validator(self.hass, user_input) try: - await validator.custom_openai() + custom_openai = OpenAI(self.hass, api_key=user_input[CONF_CUSTOM_OPENAI_API_KEY], endpoint={ + 'base_url': user_input[CONF_CUSTOM_OPENAI_ENDPOINT] + }) + await custom_openai.validate() # add the mode to user_input user_input["provider"] = self.init_info["provider"] return self.async_create_entry(title="Custom OpenAI compatible Provider", data=user_input) @@ -526,19 +328,12 @@ async def async_step_semantic_index(self, user_input=None): }) if user_input is not None: user_input["provider"] = self.init_info["provider"] - validator = Validator(self.hass, user_input) - try: - if not await validator.semantic_index(): - return self.async_abort(reason="already_configured") + + for uid in self.hass.data[DOMAIN]: + if 'retention_time' in self.hass.data[DOMAIN][uid]: + self.async_abort(reason="already_configured") # add the mode to user_input - return self.async_create_entry(title="LLM Vision Events", data=user_input) - except ServiceValidationError as e: - _LOGGER.error(f"Validation failed: {e}") - return self.async_show_form( - step_id="semantic_index", - data_schema=data_schema, - errors={"base": "handshake_failed"} - ) + return self.async_create_entry(title="LLM Vision Events", data=user_input) return self.async_show_form( step_id="semantic_index", diff --git a/custom_components/llmvision/const.py b/custom_components/llmvision/const.py index 49403d4..f2d4c6b 100644 --- a/custom_components/llmvision/const.py +++ b/custom_components/llmvision/const.py @@ -6,7 +6,8 @@ # Configuration values from setup CONF_OPENAI_API_KEY = 'openai_api_key' CONF_AZURE_API_KEY = 'azure_api_key' -CONF_AZURE_ENDPOINT = 'azure_endpoint' +CONF_AZURE_BASE_URL = 'azure_base_url' +CONF_AZURE_DEPLOYMENT = 'azure_deployment' CONF_AZURE_VERSION = 'azure_version' CONF_ANTHROPIC_API_KEY = 'anthropic_api_key' CONF_GOOGLE_API_KEY = 'google_api_key' @@ -57,4 +58,5 @@ ENDPOINT_GOOGLE = "https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent?key={api_key}" ENDPOINT_GROQ = "https://api.groq.com/openai/v1/chat/completions" ENDPOINT_LOCALAI = "{protocol}://{ip_address}:{port}/v1/chat/completions" -ENDPOINT_OLLAMA = "{protocol}://{ip_address}:{port}/api/chat" \ No newline at end of file +ENDPOINT_OLLAMA = "{protocol}://{ip_address}:{port}/api/chat" +ENDPOINT_AZURE = "https://{base_url}/openai/deployments/{deployment}/chat/completions?api-version={api_version}" diff --git a/custom_components/llmvision/manifest.json b/custom_components/llmvision/manifest.json index dc298f5..f7001e4 100644 --- a/custom_components/llmvision/manifest.json +++ b/custom_components/llmvision/manifest.json @@ -6,6 +6,5 @@ "documentation": "https://github.com/valentinfrlch/ha-llmvision", "iot_class": "cloud_polling", "issue_tracker": "https://github.com/valentinfrlch/ha-llmvision/issues", - "requirements": ["openai==1.54.3"], - "version": "1.3.2" + "version": "1.3.5" } \ No newline at end of file diff --git a/custom_components/llmvision/media_handlers.py b/custom_components/llmvision/media_handlers.py index f37ab8c..088b16b 100644 --- a/custom_components/llmvision/media_handlers.py +++ b/custom_components/llmvision/media_handlers.py @@ -135,6 +135,28 @@ async def resize_image(self, target_width, image_path=None, image_data=None, img base64_image = await self._encode_image(img) return base64_image + + async def _fetch(self, url, max_retries=2, retry_delay=1): + """Fetch image from url and return image data""" + retries = 0 + while retries < max_retries: + _LOGGER.info( + f"Fetching {url} (attempt {retries + 1}/{max_retries})") + try: + response = await self.session.get(url) + if response.status != 200: + _LOGGER.warning( + f"Couldn't fetch frame (status code: {response.status})") + retries += 1 + await asyncio.sleep(retry_delay) + continue + data = await response.read() + return data + except Exception as e: + _LOGGER.error(f"Fetch failed: {e}") + retries += 1 + await asyncio.sleep(retry_delay) + _LOGGER.warning(f"Failed to fetch {url} after {max_retries} retries") async def record(self, image_entities, duration, max_frames, target_width, include_filename, expose_images): """Wrapper for client.add_frame with integrated recorder @@ -162,7 +184,7 @@ async def record_camera(image_entity, camera_number): frame_url = base_url + \ self.hass.states.get(image_entity).attributes.get( 'entity_picture') - frame_data = await self.client._fetch(frame_url) + frame_data = await self._fetch(frame_url) # Skip frame if fetch failed if not frame_data: @@ -251,7 +273,7 @@ async def add_images(self, image_entities, image_paths, target_width, include_fi image_url = base_url + \ self.hass.states.get(image_entity).attributes.get( 'entity_picture') - image_data = await self.client._fetch(image_url) + image_data = await self._fetch(image_url) # Skip frame if fetch failed if not image_data: @@ -306,7 +328,7 @@ async def add_videos(self, video_paths, event_ids, max_frames, target_width, inc try: base_url = get_url(self.hass) frigate_url = base_url + "/api/frigate/notifications/" + event_id + "/clip.mp4" - clip_data = await self.client._fetch(frigate_url) + clip_data = await self._fetch(frigate_url) if not clip_data: raise ServiceValidationError( diff --git a/custom_components/llmvision/providers.py b/custom_components/llmvision/providers.py index b213d8a..ef82190 100644 --- a/custom_components/llmvision/providers.py +++ b/custom_components/llmvision/providers.py @@ -1,16 +1,14 @@ from abc import ABC, abstractmethod -from openai import AsyncOpenAI, AsyncAzureOpenAI from homeassistant.exceptions import ServiceValidationError from homeassistant.helpers.aiohttp_client import async_get_clientsession -from homeassistant.helpers.httpx_client import get_async_client import logging -import asyncio import inspect from .const import ( DOMAIN, CONF_OPENAI_API_KEY, CONF_AZURE_API_KEY, - CONF_AZURE_ENDPOINT, + CONF_AZURE_BASE_URL, + CONF_AZURE_DEPLOYMENT, CONF_AZURE_VERSION, CONF_ANTHROPIC_API_KEY, CONF_GOOGLE_API_KEY, @@ -24,6 +22,7 @@ CONF_CUSTOM_OPENAI_ENDPOINT, CONF_CUSTOM_OPENAI_API_KEY, VERSION_ANTHROPIC, + ENDPOINT_OPENAI, ENDPOINT_ANTHROPIC, ENDPOINT_GOOGLE, ENDPOINT_LOCALAI, @@ -37,58 +36,6 @@ _LOGGER = logging.getLogger(__name__) -def sanitize_data(data): - """Remove long string data from request data to reduce log size""" - if isinstance(data, dict): - return {key: sanitize_data(value) for key, value in data.items()} - elif isinstance(data, list): - return [sanitize_data(item) for item in data] - elif isinstance(data, str) and len(data) > 400 and data.count(' ') < 50: - return '' - else: - return data - - -def get_provider(hass, provider_uid): - """Translate UID of the config entry into provider name""" - if DOMAIN not in hass.data: - return None - - entry_data = hass.data[DOMAIN].get(provider_uid) - if not entry_data: - return None - - if CONF_OPENAI_API_KEY in entry_data: - return "OpenAI" - elif CONF_AZURE_API_KEY in entry_data: - return "Azure" - elif CONF_ANTHROPIC_API_KEY in entry_data: - return "Anthropic" - elif CONF_GOOGLE_API_KEY in entry_data: - return "Google" - elif CONF_GROQ_API_KEY in entry_data: - return "Groq" - elif CONF_LOCALAI_IP_ADDRESS in entry_data: - return "LocalAI" - elif CONF_OLLAMA_IP_ADDRESS in entry_data: - return "Ollama" - elif CONF_CUSTOM_OPENAI_API_KEY in entry_data: - return "Custom OpenAI" - - return None - - -def default_model(provider): return { - "OpenAI": "gpt-4o-mini", - "Anthropic": "claude-3-5-sonnet-latest", - "Google": "gemini-1.5-flash-latest", - "Groq": "llava-v1.5-7b-4096-preview", - "LocalAI": "gpt-4-vision-preview", - "Ollama": "llava-phi3:latest", - "Custom OpenAI": "gpt-4o-mini" -}.get(provider, "gpt-4o-mini") # Default value - - class Request: def __init__(self, hass, message, max_tokens, temperature): self.session = async_get_clientsession(hass) @@ -99,87 +46,177 @@ def __init__(self, hass, message, max_tokens, temperature): self.base64_images = [] self.filenames = [] + @staticmethod + def sanitize_data(data): + """Remove long string data from request data to reduce log size""" + if isinstance(data, dict): + return {key: Request.sanitize_data(value) for key, value in data.items()} + elif isinstance(data, list): + return [Request.sanitize_data(item) for item in data] + elif isinstance(data, str) and len(data) > 400 and data.count(' ') < 50: + return '' + else: + return data + + @staticmethod + def get_provider(hass, provider_uid): + """Translate UID of the config entry into provider name""" + if DOMAIN not in hass.data: + return None + + entry_data = hass.data[DOMAIN].get(provider_uid) + if not entry_data: + return None + + if CONF_ANTHROPIC_API_KEY in entry_data: + return "Anthropic" + elif CONF_AZURE_API_KEY in entry_data: + return "Azure" + elif CONF_CUSTOM_OPENAI_API_KEY in entry_data: + return "Custom OpenAI" + elif CONF_GOOGLE_API_KEY in entry_data: + return "Google" + elif CONF_GROQ_API_KEY in entry_data: + return "Groq" + elif CONF_LOCALAI_IP_ADDRESS in entry_data: + return "LocalAI" + elif CONF_OLLAMA_IP_ADDRESS in entry_data: + return "Ollama" + elif CONF_OPENAI_API_KEY in entry_data: + return "OpenAI" + + return None + + def validate(self, call): + """Validate call data""" + # Check image input + if not call.base64_images: + raise ServiceValidationError(ERROR_NO_IMAGE_INPUT) + # Check if single image is provided for Groq + if len(call.base64_images) > 1 and self.get_provider(call.provider) == 'Groq': + raise ServiceValidationError(ERROR_GROQ_MULTIPLE_IMAGES) + # Check provider is configured + if not call.provider: + raise ServiceValidationError(ERROR_NOT_CONFIGURED) + + @staticmethod + def default_model(provider): return { + "Anthropic": "claude-3-5-sonnet-latest", + "Azure": "gpt-4o-mini", + "Custom OpenAI": "gpt-4o-mini", + "Google": "gemini-1.5-flash-latest", + "Groq": "llava-v1.5-7b-4096-preview", + "LocalAI": "gpt-4-vision-preview", + "Ollama": "llava-phi3:latest", + "OpenAI": "gpt-4o-mini" + }.get(provider, "gpt-4o-mini") # Default value + async def call(self, call): entry_id = call.provider - provider = get_provider(self.hass, entry_id) - _LOGGER.info(f"Provider from call: {provider}") - model = call.model if call.model != "None" else default_model(provider) - gen_title_prompt = "Your job is to generate a title in the form ' seen' for texts. Do not mention the time, do not speculate. Generate a title for this text: {response}" + config = self.hass.data.get(DOMAIN).get(entry_id) - config = self.hass.data.get(DOMAIN).get(entry_id) + provider = Request.get_provider(self.hass, entry_id) + call.model = call.model if call.model and call.model != 'None' else Request.default_model( + provider) + call.base64_images = self.base64_images + call.filenames = self.filenames + + gen_title_prompt = "Your job is to generate a title in the form ' seen' for texts. Do not mention the time, do not speculate. Generate a title for this text: {response}" if provider == 'OpenAI': api_key = config.get(CONF_OPENAI_API_KEY) - request = OpenAI(self.hass, model, self.max_tokens, self.temperature, - self.message, self.base64_images, self.filenames) - response_text = await request.vision_request(api_key=api_key) + openai = OpenAI(hass=self.hass, api_key=api_key) + response_text = await openai.vision_request(call) if call.generate_title: - gen_title = await request.text_request(api_key=api_key, prompt=gen_title_prompt.format(response=response_text)) - if provider == 'Azure': + call.message = gen_title_prompt.format(response=response_text) + gen_title = await openai.title_request(call) + + elif provider == 'Azure': api_key = config.get(CONF_AZURE_API_KEY) - endpoint = config.get(CONF_AZURE_ENDPOINT) + base_url = config.get(CONF_AZURE_BASE_URL) + deployment = config.get(CONF_AZURE_DEPLOYMENT) version = config.get(CONF_AZURE_VERSION) - request = AzureOpenAI(self.hass, model, self.max_tokens, self.temperature, - self.message, self.base64_images, self.filenames) - response_text = await request.vision_request(api_key=api_key, endpoint=endpoint, version=version) + azure = AzureOpenAI(self.hass, + api_key=api_key, + endpoint={ + 'base_url': base_url, + 'deployment': deployment, + 'api_version': version + }) + response_text = await azure.vision_request(call) if call.generate_title: - gen_title = await request.text_request(api_key=api_key, endpoint=endpoint, version=version, prompt=gen_title_prompt.format(response=response_text)) + call.message = gen_title_prompt.format(response=response_text) + gen_title = await azure.title_request(call) + elif provider == 'Anthropic': api_key = config.get(CONF_ANTHROPIC_API_KEY) - request = Anthropic(self.hass, model, self.max_tokens, self.temperature, - self.message, self.base64_images, self.filenames) - response_text = await request.vision_request(api_key=api_key, endpoint=ENDPOINT_ANTHROPIC) + anthropic = Anthropic(self.hass, api_key=api_key) + response_text = await anthropic.vision_request(call) if call.generate_title: - gen_title = await request.text_request(api_key=api_key, prompt=gen_title_prompt.format(response=response_text)) + call.message = gen_title_prompt.format(response=response_text) + gen_title = await anthropic.title_request(call) + elif provider == 'Google': api_key = config.get(CONF_GOOGLE_API_KEY) - request = Google(self.hass, model, self.max_tokens, self.temperature, - self.message, self.base64_images, self.filenames) - response_text = await request.vision_request(api_key=api_key, endpoint=ENDPOINT_GOOGLE) + google = Google(self.hass, api_key=api_key, endpoint={ + 'base_url': ENDPOINT_GOOGLE, 'model': call.model + }) + response_text = await google.vision_request(call) if call.generate_title: - gen_title = await request.text_request(api_key=api_key, prompt=gen_title_prompt.format(response=response_text)) + call.message = gen_title_prompt.format(response=response_text) + gen_title = await google.title_request(call) + elif provider == 'Groq': api_key = config.get(CONF_GROQ_API_KEY) - request = Groq(self.hass, model, self.max_tokens, self.temperature, - self.message, self.base64_images, self.filenames) - response_text = await request.vision_request(api_key=api_key, endpoint=ENDPOINT_GROQ) + groq = Groq(self.hass, api_key=api_key) + response_text = await groq.vision_request(call) if call.generate_title: - gen_title = await request.text_request(api_key=api_key, prompt=gen_title_prompt.format(response=response_text)) + gen_title = await groq.title_request(call) + elif provider == 'LocalAI': ip_address = config.get(CONF_LOCALAI_IP_ADDRESS) port = config.get(CONF_LOCALAI_PORT) https = config.get(CONF_LOCALAI_HTTPS, False) - request = LocalAI(self.hass, model, self.max_tokens, self.temperature, - self.message, self.base64_images, self.filenames) - response_text = await request.vision_request(endpoint=ENDPOINT_LOCALAI, ip_address=ip_address, port=port, https=https) + localai = LocalAI(self.hass, endpoint={ + 'ip_address': ip_address, + 'port': port, + 'https': https + }) + response_text = await localai.vision_request(call) if call.generate_title: - gen_title = await request.text_request(api_key=api_key, prompt=gen_title_prompt.format(response=response_text)) + call.message = gen_title_prompt.format(response=response_text) + gen_title = await localai.title_request(call) elif provider == 'Ollama': ip_address = config.get(CONF_OLLAMA_IP_ADDRESS) port = config.get(CONF_OLLAMA_PORT) https = config.get(CONF_OLLAMA_HTTPS, False) - request = Ollama(self.hass, model, self.max_tokens, self.temperature, - self.message, self.base64_images, self.filenames) - response_text = await request.vision_request(endpoint=ENDPOINT_OLLAMA, ip_address=ip_address, port=port, https=https) + ollama = Ollama(self.hass, endpoint={ + 'ip_address': ip_address, + 'port': port, + 'https': https + }) + response_text = await ollama.vision_request(call) if call.generate_title: - gen_title = await request.text_request(api_key=api_key, prompt=gen_title_prompt.format(response=response_text)) + call.message = gen_title_prompt.format(response=response_text) + gen_title = await ollama.title_request(call) elif provider == 'Custom OpenAI': api_key = config.get(CONF_CUSTOM_OPENAI_API_KEY, "") endpoint = config.get( CONF_CUSTOM_OPENAI_ENDPOINT) + "/v1/chat/completions" - request = OpenAI(self.hass, model, self.max_tokens, self.temperature, - self.message, self.base64_images, self.filenames) - response_text = await request.vision_request(api_key, endpoint) + custom_openai = OpenAI( + self.hass, api_key=api_key, endpoint=endpoint) + response_text = await custom_openai.vision_request(api_key, endpoint) if call.generate_title: - gen_title = await request.text_request(api_key=api_key, prompt=gen_title_prompt.format(response=response_text)) + call.message = gen_title_prompt.format(response=response_text) + gen_title = await custom_openai.title_request(api_key=api_key, prompt=gen_title_prompt.format(response=response_text)) else: raise ServiceValidationError("invalid_provider") @@ -212,50 +249,52 @@ async def _resolve_error(self, response, provider): class Provider(ABC): - def __init__(self, hass, model, max_tokens, temperature, message, base64_images, filenames): + def __init__(self, + hass, + api_key="", + endpoint={ + 'base_url': "", + 'deployment': "", + 'api_version': "", + 'ip_address': "", + 'port': "", + 'https': False + } + ): self.hass = hass self.session = async_get_clientsession(hass) - self.model = model - self.max_tokens = max_tokens - self.temperature = temperature - self.message = message - self.base64_images = base64_images - self.filenames = filenames + self.api_key = api_key + self.endpoint = endpoint @abstractmethod - async def _make_request(self, **kwargs) -> str: + async def _make_request(self, data) -> str: pass @abstractmethod - def validate(self) -> bool: + def _prepare_vision_data(self, call) -> dict: pass - def validate_images(self): - if not self.base64_images or len(self.base64_images) == 0: - raise ServiceValidationError(ERROR_NO_IMAGE_INPUT) + @abstractmethod + def _prepare_text_data(self, call) -> dict: + pass + + @abstractmethod + async def validate(self) -> None | ServiceValidationError: + pass + + async def vision_request(self, call) -> str: + data = self._prepare_vision_data(call) + return await self._make_request(data) - async def vision_request(self, **kwargs) -> str: - self.validate_images() - self.validate(**kwargs) - if "prompt" in kwargs: - self.message = kwargs.get("prompt") - kwargs["data"] = self._prepare_vision_data() - return await self._make_request(**kwargs) - - async def text_request(self, **kwargs) -> str: - _LOGGER.info(f"Request received: {kwargs.get('prompt')}") - self.validate_images() - self.validate(**kwargs) - if "prompt" in kwargs: - self.message = kwargs.get("prompt") - self.temperature = 0.1 - self.max_tokens = 3 - kwargs["data"] = self._prepare_text_data() - return await self._make_request(**kwargs) + async def title_request(self, call) -> str: + call.temperature = 0.1 + call.max_tokens = 3 + data = self._prepare_text_data(call) + return await self._make_request(data) async def _post(self, url, headers, data) -> dict: """Post data to url and return response data""" - _LOGGER.info(f"Request data: {sanitize_data(data)}") + _LOGGER.info(f"Request data: {Request.sanitize_data(data)}") try: _LOGGER.info(f"Posting to {url} with headers {headers}") @@ -273,28 +312,6 @@ async def _post(self, url, headers, data) -> dict: _LOGGER.info(f"Response data: {response_data}") return response_data - async def _fetch(self, url, max_retries=2, retry_delay=1): - """Fetch image from url and return image data""" - retries = 0 - while retries < max_retries: - _LOGGER.info( - f"Fetching {url} (attempt {retries + 1}/{max_retries})") - try: - response = await self.session.get(url) - if response.status != 200: - _LOGGER.warning( - f"Couldn't fetch frame (status code: {response.status})") - retries += 1 - await asyncio.sleep(retry_delay) - continue - data = await response.read() - return data - except Exception as e: - _LOGGER.error(f"Fetch failed: {e}") - retries += 1 - await asyncio.sleep(retry_delay) - _LOGGER.warning(f"Failed to fetch {url} after {max_retries} retries") - async def _resolve_error(self, response, provider) -> str: """Translate response status to error message""" import json @@ -318,124 +335,155 @@ async def _resolve_error(self, response, provider) -> str: class OpenAI(Provider): - def __init__(self, hass, model, max_tokens, temperature, message, base64_images, filenames): - super().__init__(hass, model, max_tokens, temperature, - message, base64_images, filenames) - - async def _make_request(self, **kwargs) -> str: - client = AsyncOpenAI( - api_key=kwargs.get("api_key"), - http_client=get_async_client(self.hass), - ) + def __init__(self, hass, api_key="", endpoint={'base_url': ENDPOINT_OPENAI}): + super().__init__(hass, api_key, endpoint=endpoint) + + def _generate_headers(self) -> dict: + return {'Content-type': 'application/json', + 'Authorization': 'Bearer ' + self.api_key} - messages = kwargs.get("data") + async def _make_request(self, data) -> str: + headers = self._generate_headers() - response = await client.chat.completions.create( - model=self.model, - messages=messages, - max_tokens=self.max_tokens, - temperature=self.temperature, - ) + response = await self._post(url=self.endpoint.get('base_url'), headers=headers, data=data) - response_text = response.choices[0].message.content + response_text = response.get( + "choices")[0].get("message").get("content") return response_text - def _prepare_vision_data(self) -> list: - messages = [{"role": "user", "content": []}] - for image, filename in zip(self.base64_images, self.filenames): - tag = ("Image " + str(self.base64_images.index(image) + 1) + def _prepare_vision_data(self, call) -> list: + payload = {"model": call.model, + "messages": [{"role": "user", "content": []}], + "max_tokens": call.max_tokens, + "temperature": call.temperature + } + + for image, filename in zip(call.base64_images, call.filenames): + tag = ("Image " + str(call.base64_images.index(image) + 1) ) if filename == "" else filename - messages[0]["content"].append({"type": "text", "text": tag + ":"}) - messages[0]["content"].append({"type": "image_url", "image_url": { - "url": f"data:image/jpeg;base64,{image}"}}) - messages[0]["content"].append({"type": "text", "text": self.message}) - return messages + payload["messages"][0]["content"].append( + {"type": "text", "text": tag + ":"}) + payload["messages"][0]["content"].append({"type": "image_url", "image_url": { + "url": f"data:image/jpeg;base64,{image}"}}) + payload["messages"][0]["content"].append( + {"type": "text", "text": call.message}) + return payload - def _prepare_text_data(self) -> list: - return [{"role": "user", "content": [{"type": "text", "text": self.message}]}] + def _prepare_text_data(self, call) -> list: + return { + "model": call.model, + "messages": [{"role": "user", "content": [{"type": "text", "text": call.message}]}], + "max_tokens": call.max_tokens, + "temperature": call.temperature + } - def validate(self, **kwargs): - if not kwargs.get("api_key"): - raise ServiceValidationError( - ERROR_NOT_CONFIGURED.format(provider="OpenAI")) + async def validate(self) -> None | ServiceValidationError: + if self.api_key: + headers = self._generate_headers() + data = { + "model": "gpt-4o-mini", + "messages": [{"role": "user", "content": [{"type": "text", "text": "Hi"}]}], + "max_tokens": 1, + "temperature": 0.5 + } + await self._post(url=self.endpoint.get('base_url'), headers=headers, data=data) + else: + raise ServiceValidationError("empty_api_key") class AzureOpenAI(Provider): - def __init__(self, hass, model, max_tokens, temperature, message, base64_images, filenames): - super().__init__(hass, model, max_tokens, temperature, - message, base64_images, filenames) - - async def _make_request(self, **kwargs) -> str: - client = AsyncAzureOpenAI( - api_key=kwargs.get("api_key"), - azure_endpoint=kwargs.get("endpoint"), - api_version=kwargs.get("version"), - http_client=get_async_client(self.hass), - ) + def __init__(self, hass, api_key="", endpoint={'base_url': "", 'deployment': "", 'api_version': ""}): + super().__init__(hass, api_key, endpoint) - messages = kwargs.get("data") + def _generate_headers(self) -> dict: + return {'Content-type': 'application/json', + 'api-key': self.api_key} - response = await client.chat.completions.create( - model=self.model, - messages=messages, - max_tokens=self.max_tokens, - temperature=self.temperature, + async def _make_request(self, data) -> str: + headers = self._generate_headers() + endpoint = self.endpoint.get("endpoint").format( + base_url=self.endpoint.get("base_url"), + deployment=self.endpoint.get("deployment"), + api_version=self.endpoint.get("api_version") ) - response_text = response.choices[0].message.content + response = await self._post( + url=endpoint, headers=headers, data=data) + + response_text = response.get( + "choices")[0].get("message").get("content") return response_text - def _prepare_vision_data(self) -> list: - messages = [{"role": "user", "content": []}] - for image, filename in zip(self.base64_images, self.filenames): - tag = ("Image " + str(self.base64_images.index(image) + 1) + def _prepare_vision_data(self, call) -> list: + payload = {"messages": [{"role": "user", "content": []}], + "max_tokens": call.max_tokens, + "temperature": call.temperature, + "stream": False + } + for image, filename in zip(call.base64_images, call.filenames): + tag = ("Image " + str(call.base64_images.index(image) + 1) ) if filename == "" else filename - messages[0]["content"].append({"type": "text", "text": tag + ":"}) - messages[0]["content"].append({"type": "image_url", "image_url": { - "url": f"data:image/jpeg;base64,{image}"}}) - messages[0]["content"].append({"type": "text", "text": self.message}) - return messages + payload["messages"][0]["content"].append( + {"type": "text", "text": tag + ":"}) + payload["messages"][0]["content"].append({"type": "image_url", "image_url": { + "url": f"data:image/jpeg;base64,{image}"}}) + payload["messages"][0]["content"].append( + {"type": "text", "text": call.message}) + return payload["messages"] + + def _prepare_text_data(self, call) -> list: + return {"messages": [{"role": "user", "content": [{"type": "text", "text": call.message}]}], + "max_tokens": call.max_tokens, + "temperature": call.temperature, + "stream": False + } - def _prepare_text_data(self) -> list: - return [{"role": "user", "content": [{"type": "text", "text": self.message}]}] + async def validate(self) -> None | ServiceValidationError: + if not self.api_key: + raise ServiceValidationError("empty_api_key") - def validate(self, **kwargs): - if not kwargs.get("api_key"): - raise ServiceValidationError( - ERROR_NOT_CONFIGURED.format(provider="Azure")) + endpoint = self.endpoint.get("endpoint").format( + base_url=self.endpoint.get("base_url"), + deployment=self.endpoint.get("deployment"), + api_version=self.endpoint.get("api_version") + ) + headers = self._generate_headers() + data = {"messages": [{"role": "user", "content": [{"type": "text", "text": "Hi"}]}], + "max_tokens": 1, + "temperature": 0.5, + "stream": False + } + await self._post(url=endpoint, headers=headers, data=data) class Anthropic(Provider): - def __init__(self, hass, model, max_tokens, temperature, message, base64_images, filenames): - super().__init__(hass, model, max_tokens, temperature, - message, base64_images, filenames) + def __init__(self, hass, api_key=""): + super().__init__(hass, api_key) - def _generate_headers(self, api_key: str) -> dict: + def _generate_headers(self) -> dict: return { 'content-type': 'application/json', - 'x-api-key': api_key, + 'x-api-key': self.api_key, 'anthropic-version': VERSION_ANTHROPIC } - async def _make_request(self, **kwargs) -> str: - api_key = kwargs.get("api_key") - endpoint = kwargs.get("endpoint") - data = kwargs.get("data") + async def _make_request(self, data) -> str: + api_key = self.api_key - headers = self._generate_headers(api_key) - response = await self._post(url=endpoint, headers=headers, data=data) + headers = self._generate_headers() + response = await self._post(url=ENDPOINT_ANTHROPIC, headers=headers, data=data) response_text = response.get("content")[0].get("text") return response_text - def _prepare_vision_data(self) -> dict: + def _prepare_vision_data(self, call) -> dict: data = { - "model": self.model, + "model": call.model, "messages": [{"role": "user", "content": []}], - "max_tokens": self.max_tokens, - "temperature": self.temperature + "max_tokens": call.max_tokens, + "temperature": call.temperature } - for image, filename in zip(self.base64_images, self.filenames): - tag = ("Image " + str(self.base64_images.index(image) + 1) + for image, filename in zip(call.base64_images, call.filenames): + tag = ("Image " + str(call.base64_images.index(image) + 1) ) if filename == "" else filename data["messages"][0]["content"].append( {"type": "text", "text": tag + ":"}) @@ -445,210 +493,247 @@ def _prepare_vision_data(self) -> dict: {"type": "text", "text": self.message}) return data - def _prepare_text_data(self) -> dict: + def _prepare_text_data(self, call) -> dict: return { - "model": self.model, - "messages": [{"role": "user", "content": [{"type": "text", "text": self.message}]}], - "max_tokens": self.max_tokens, - "temperature": self.temperature + "model": call.model, + "messages": [{"role": "user", "content": [{"type": "text", "text": call.message}]}], + "max_tokens": call.max_tokens, + "temperature": call.temperature } - def validate(self, **kwargs): - if not kwargs.get("api_key"): - raise ServiceValidationError( - ERROR_NOT_CONFIGURED.format(provider="Anthropic")) + async def validate(self) -> None | ServiceValidationError: + if not self.api_key: + raise ServiceValidationError("empty_api_key") + + header = self._generate_headers() + payload = { + "model": "claude-3-haiku-20240307", + "messages": [ + {"role": "user", "content": "Hi"} + ], + "max_tokens": 1, + "temperature": 0.5 + } + await self._post(url=f"https://api.anthropic.com/v1/messages", headers=header, data=payload) class Google(Provider): - def __init__(self, hass, model, max_tokens, temperature, message, base64_images, filenames): - super().__init__(hass, model, max_tokens, temperature, - message, base64_images, filenames) + def __init__(self, hass, api_key="", endpoint={'base_url': ENDPOINT_GOOGLE, 'model': "gemini-1.5-flash-latest"}): + super().__init__(hass, api_key, endpoint) def _generate_headers(self) -> dict: return {'content-type': 'application/json'} - async def _make_request(self, **kwargs) -> str: - api_key = kwargs.get("api_key") - endpoint = kwargs.get("endpoint") - data = kwargs.get("data") + async def _make_request(self, data) -> str: + endpoint = self.endpoint.get('base_url').format( + model=self.endpoint.get('model'), api_key=self.api_key) headers = self._generate_headers() - response = await self._post(url=endpoint.format(model=self.model, api_key=api_key), headers=headers, data=data) + response = await self._post(url=endpoint, headers=headers, data=data) response_text = response.get("candidates")[0].get( "content").get("parts")[0].get("text") return response_text - def _prepare_vision_data(self) -> dict: + def _prepare_vision_data(self, call) -> dict: data = {"contents": [], "generationConfig": { - "maxOutputTokens": self.max_tokens, "temperature": self.temperature}} - for image, filename in zip(self.base64_images, self.filenames): - tag = ("Image " + str(self.base64_images.index(image) + 1) + "maxOutputTokens": call.max_tokens, "temperature": call.temperature}} + for image, filename in zip(call.base64_images, call.filenames): + tag = ("Image " + str(call.base64_images.index(image) + 1) ) if filename == "" else filename data["contents"].append({"role": "user", "parts": [ {"text": tag + ":"}, {"inline_data": {"mime_type": "image/jpeg", "data": image}}]}) data["contents"].append( - {"role": "user", "parts": [{"text": self.message}]}) + {"role": "user", "parts": [{"text": call.message}]}) return data - def _prepare_text_data(self) -> dict: + def _prepare_text_data(self, call) -> dict: return { - "contents": [{"role": "user", "parts": [{"text": self.message + ":"}]}], - "generationConfig": {"maxOutputTokens": self.max_tokens, "temperature": self.temperature} + "contents": [{"role": "user", "parts": [{"text": call.message + ":"}]}], + "generationConfig": {"maxOutputTokens": call.max_tokens, "temperature": call.temperature} } - def validate(self, **kwargs): - if not kwargs.get("api_key"): - raise ServiceValidationError( - ERROR_NOT_CONFIGURED.format(provider="Google")) + async def validate(self) -> None | ServiceValidationError: + if not self.api_key: + raise ServiceValidationError("empty_api_key") + + headers = self._generate_headers() + data = { + "contents": [{"role": "user", "parts": [{"text": "Hi"}]}], + "generationConfig": {"maxOutputTokens": 1, "temperature": 0.5} + } + await self._post(url=self.endpoint.get('base_url').format(model=self.endpoint.get('model'), api_key=self.api_key), headers=headers, data=data) class Groq(Provider): - def __init__(self, hass, model, max_tokens, temperature, message, base64_images, filenames): - super().__init__(hass, model, max_tokens, temperature, - message, base64_images, filenames) + def __init__(self, hass, api_key=""): + super().__init__(hass, api_key) - def _generate_headers(self, api_key: str) -> dict: - return {'Content-type': 'application/json', 'Authorization': 'Bearer ' + api_key} + def _generate_headers(self) -> dict: + return {'Content-type': 'application/json', 'Authorization': 'Bearer ' + self.api_key} - async def _make_request(self, **kwargs) -> str: - api_key = kwargs.get("api_key") - endpoint = kwargs.get("endpoint") - data = kwargs.get("data") + async def _make_request(self, data) -> str: + api_key = self.api_key - headers = self._generate_headers(api_key) - response = await self._post(url=endpoint, headers=headers, data=data) + headers = self._generate_headers() + response = await self._post(url=ENDPOINT_GROQ, headers=headers, data=data) response_text = response.get( "choices")[0].get("message").get("content") return response_text - def _prepare_vision_data(self) -> dict: - first_image = self.base64_images[0] + def _prepare_vision_data(self, call) -> dict: + first_image = call.base64_images[0] data = { "messages": [ { "role": "user", "content": [ - {"type": "text", "text": self.message}, + {"type": "text", "text": call.message}, {"type": "image_url", "image_url": { "url": f"data:image/jpeg;base64,{first_image}"}} ] } ], - "model": self.model + "model": call.model } return data - def _prepare_text_data(self) -> dict: + def _prepare_text_data(self, call) -> dict: return { "messages": [ { "role": "user", "content": [ - {"type": "text", "text": self.message} + {"type": "text", "text": call.message} ] } ], - "model": self.model + "model": call.model } - def validate(self, **kwargs): - if not kwargs.get("api_key"): - raise ServiceValidationError( - ERROR_NOT_CONFIGURED.format(provider="Groq")) - if len(kwargs.get("base64_images")) > 1: - raise ServiceValidationError(ERROR_GROQ_MULTIPLE_IMAGES) + async def validate(self) -> None | ServiceValidationError: + if not self.api_key: + raise ServiceValidationError("empty_api_key") + headers = self._generate_headers() + data = { + "contents": [{ + "parts": [ + {"text": "Hello"} + ]} + ] + } + await self._post(url=ENDPOINT_GROQ, headers=headers, data=data) class LocalAI(Provider): - def __init__(self, hass, model, max_tokens, temperature, message, base64_images, filenames): - super().__init__(hass, model, max_tokens, temperature, - message, base64_images, filenames) - - async def _make_request(self, **kwargs) -> str: - endpoint = kwargs.get("endpoint") - data = kwargs.get("data") - https = kwargs.get("https") - ip_address = kwargs.get("ip_address") - port = kwargs.get("port") + def __init__(self, hass, api_key="", endpoint={'ip_address': "", 'port': "", 'https': False}): + super().__init__(hass, api_key, endpoint) + + async def _make_request(self, data) -> str: + endpoint = ENDPOINT_LOCALAI.format( + protocol="https" if self.endpoint.get("https") else "http", + ip_address=self.endpoint.get("ip_address"), + port=self.endpoint.get("port") + ) headers = self._generate_headers() - protocol = "https" if https else "http" - response = await self._post(url=endpoint.format(ip_address=ip_address, port=port, protocol=protocol), headers=headers, data=data) + response = await self._post(url=endpoint, headers=headers, data=data) response_text = response.get( "choices")[0].get("message").get("content") return response_text - def _prepare_vision_data(self) -> dict: - data = {"model": self.model, "messages": [{"role": "user", "content": [ - ]}], "max_tokens": self.max_tokens, "temperature": self.temperature} - for image, filename in zip(self.base64_images, self.filenames): - tag = ("Image " + str(self.base64_images.index(image) + 1) + def _prepare_vision_data(self, call) -> dict: + data = {"model": call.model, "messages": [{"role": "user", "content": [ + ]}], "max_tokens": call.max_tokens, "temperature": call.temperature} + for image, filename in zip(call.base64_images, call.filenames): + tag = ("Image " + str(call.base64_images.index(image) + 1) ) if filename == "" else filename data["messages"][0]["content"].append( {"type": "text", "text": tag + ":"}) data["messages"][0]["content"].append( {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image}"}}) data["messages"][0]["content"].append( - {"type": "text", "text": self.message}) + {"type": "text", "text": call.message}) return data - def _prepare_text_data(self) -> dict: + def _prepare_text_data(self, call) -> dict: return { - "model": self.model, - "messages": [{"role": "user", "content": [{"type": "text", "text": self.message}]}], - "max_tokens": self.max_tokens, - "temperature": self.temperature + "model": call.model, + "messages": [{"role": "user", "content": [{"type": "text", "text": call.message}]}], + "max_tokens": call.max_tokens, + "temperature": call.temperature } - def validate(self, **kwargs): - if not kwargs.get("ip_address") or not kwargs.get("port"): - raise ServiceValidationError( - ERROR_NOT_CONFIGURED.format(provider="LocalAI")) + async def validate(self) -> None | ServiceValidationError: + if not self.endpoint.get("ip_address") or not self.endpoint.get("port"): + raise ServiceValidationError('handshake_failed') + session = async_get_clientsession(self.hass) + ip_address = self.endpoint.get("ip_address") + port = self.endpoint.get("port") + protocol = "https" if self.endpoint.get("https") else "http" + + try: + response = await session.get(f"{protocol}://{ip_address}:{port}/readyz") + if response.status != 200: + raise ServiceValidationError('handshake_failed') + except Exception: + raise ServiceValidationError('handshake_failed') class Ollama(Provider): - def __init__(self, hass, model, max_tokens, temperature, message, base64_images, filenames): - super().__init__(hass, model, max_tokens, temperature, - message, base64_images, filenames) + def __init__(self, hass, api_key="", endpoint={'ip_address': "0.0.0.0", 'port': "11434", 'https': False}): + super().__init__(hass, api_key, endpoint) - async def _make_request(self, **kwargs) -> str: - endpoint = kwargs.get("endpoint") - data = kwargs.get("data") - https = kwargs.get("https") - ip_address = kwargs.get("ip_address") - port = kwargs.get("port") + async def _make_request(self, data) -> str: + https = self.endpoint.get("https") + ip_address = self.endpoint.get("ip_address") + port = self.endpoint.get("port") + protocol = "https" if https else "http" + + endpoint = ENDPOINT_OLLAMA.format( + ip_address=ip_address, + port=port, + protocol=protocol + ) _LOGGER.info( f"endpoint: {endpoint} https: {https} ip_address: {ip_address} port: {port}") - headers = {} - protocol = "https" if https else "http" - response = await self._post(url=endpoint.format(ip_address=ip_address, port=port, protocol=protocol), headers=headers, data=data) + response = await self._post(url=endpoint, headers={}, data=data) response_text = response.get("message").get("content") return response_text - def _prepare_vision_data(self) -> dict: - data = {"model": self.model, "messages": [], "stream": False, "options": { - "num_predict": self.max_tokens, "temperature": self.temperature}} - for image, filename in zip(self.base64_images, self.filenames): - tag = ("Image " + str(self.base64_images.index(image) + 1) + def _prepare_vision_data(self, call) -> dict: + data = {"model": call.model, "messages": [], "stream": False, "options": { + "num_predict": call.max_tokens, "temperature": call.temperature}} + for image, filename in zip(call.base64_images, call.filenames): + tag = ("Image " + str(call.base64_images.index(image) + 1) ) if filename == "" else filename image_message = {"role": "user", "content": tag + ":", "images": [image]} data["messages"].append(image_message) - prompt_message = {"role": "user", "content": self.message} + prompt_message = {"role": "user", "content": call.message} data["messages"].append(prompt_message) return data - def _prepare_text_data(self) -> dict: + def _prepare_text_data(self, call) -> dict: return { - "model": self.model, - "messages": [{"role": "user", "content": self.message}], + "model": call.model, + "messages": [{"role": "user", "content": call.message}], "stream": False, - "options": {"num_predict": self.max_tokens, "temperature": self.temperature} + "options": {"num_predict": call.max_tokens, "temperature": call.temperature} } - def validate(self, **kwargs): - if not kwargs.get("ip_address") or not kwargs.get("port"): - raise ServiceValidationError( - ERROR_NOT_CONFIGURED.format(provider="OpenAI")) + async def validate(self) -> None | ServiceValidationError: + if not self.endpoint.get("ip_address") or not self.endpoint.get("port"): + raise ServiceValidationError('handshake_failed') + session = async_get_clientsession(self.hass) + ip_address = self.endpoint.get("ip_address") + port = self.endpoint.get("port") + protocol = "https" if self.endpoint.get("https") else "http" + + try: + response = await session.get(f"{protocol}://{ip_address}:{port}/api/tags") + if response.status != 200: + raise ServiceValidationError('handshake_failed') + except Exception: + raise ServiceValidationError('handshake_failed') From 147821b04332c8339dfffb1c6db51a276b9634e0 Mon Sep 17 00:00:00 2001 From: valentinfrlch Date: Sun, 22 Dec 2024 17:38:36 +0100 Subject: [PATCH 7/9] FIxed #90 --- custom_components/llmvision/__init__.py | 74 ++++++++++++------- custom_components/llmvision/media_handlers.py | 2 + custom_components/llmvision/providers.py | 70 +++++++++++------- 3 files changed, 93 insertions(+), 53 deletions(-) diff --git a/custom_components/llmvision/__init__.py b/custom_components/llmvision/__init__.py index 0333714..cf9345b 100644 --- a/custom_components/llmvision/__init__.py +++ b/custom_components/llmvision/__init__.py @@ -174,7 +174,8 @@ async def _remember(hass, call, start, response) -> None: else: camera_name = "Unknown" - camera_name = camera_name.replace("camera.", "").replace("image.", "").capitalize() + camera_name = camera_name.replace( + "camera.", "").replace("image.", "").capitalize() await semantic_index.remember( start=start, @@ -185,13 +186,37 @@ async def _remember(hass, call, start, response) -> None: ) -async def _update_sensor(hass, sensor_entity, new_value) -> None: +async def _update_sensor(hass, sensor_entity: str, new_value: str | int, type: str) -> None: """Update the value of a sensor entity.""" - if sensor_entity: + # Attempt to parse the response + if type == "boolean" and new_value.lower() not in ["on", "off"]: + if new_value.lower() in ["true", "false"]: + new_value = "on" if new_value.lower() == "true" else "off" + elif new_value.split(" ")[0].replace(",", "").lower() == "yes": + new_value = "on" + elif new_value.split(" ")[0].replace(",", "").lower() == "no": + new_value = "off" + else: + raise ServiceValidationError( + "Response could not be parsed. Please check your prompt.") + elif type == "number": + try: + new_value = float(new_value) + except ValueError: + raise ServiceValidationError( + "Response could not be parsed. Please check your prompt.") + elif type == "option": + options = hass.states.get(sensor_entity).attributes["options"] + if new_value not in options: + raise ServiceValidationError( + "Response could not be parsed. Please check your prompt.") + # Set the value + if new_value: _LOGGER.info( f"Updating sensor {sensor_entity} with new value: {new_value}") try: - hass.states.async_set(sensor_entity, new_value) + current_attributes = hass.states.get(sensor_entity).attributes.copy() + hass.states.async_set(sensor_entity, new_value, current_attributes) except Exception as e: _LOGGER.error(f"Failed to update sensor {sensor_entity}: {e}") raise @@ -309,40 +334,33 @@ async def stream_analyzer(data_call): async def data_analyzer(data_call): """Handle the service call to analyze visual data""" - def is_number(s): - """Helper function to check if string can be parsed as number""" - try: - float(s) - return True - except ValueError: - return False - - start = dt_util.now() call = ServiceCallData(data_call).get_service_call_data() sensor_entity = data_call.data.get("sensor_entity") _LOGGER.info(f"Sensor entity: {sensor_entity}") # get current value to determine data type state = hass.states.get(sensor_entity).state + sensor_type = sensor_entity.split(".")[0] _LOGGER.info(f"Current state: {state}") if state == "unavailable": raise ServiceValidationError("Sensor entity is unavailable") - if state == "on" or state == "off": - data_type = "'on' or 'off' (lowercase)" - elif is_number(state): - data_type = "number" + if sensor_type == "input_boolean" or sensor_type == "binary_sensor" or sensor_type == "switch" or sensor_type == "boolean": + data_type = "one of: ['on', 'off']" + type = "boolean" + elif sensor_type == "input_number" or sensor_type == "number" or sensor_type == "sensor": + data_type = "a number" + type = "number" + elif sensor_type == "input_select": + options = hass.states.get(sensor_entity).attributes["options"] + data_type = "one of these options: " + \ + ", ".join([f"'{option}'" for option in options]) + type = "option" else: - if "options" in hass.states.get(sensor_entity).attributes: - data_type = "one of these options: " + \ - ", ".join([f"'{option}'" for option in hass.states.get( - sensor_entity).attributes["options"]]) - else: - data_type = "string" - - message = f"Your job is to extract data from images. Return a {data_type} only. No additional text or other options allowed!. If unsure, choose the option that best matches. Follow these instructions: " + call.message - _LOGGER.info(f"Message: {message}") + raise ServiceValidationError("Unsupported sensor entity type") + + call.message = f"Your job is to extract data from images. You can only respond with {data_type}. You must respond with one of the options! If unsure, choose the option that best matches. Answer the following question with the options provided: " + call.message request = Request(hass, - message=message, + message=call.message, max_tokens=call.max_tokens, temperature=call.temperature, ) @@ -355,7 +373,7 @@ def is_number(s): response = await request.call(call) _LOGGER.info(f"Response: {response}") # udpate sensor in data_call.data.get("sensor_entity") - await _update_sensor(hass, sensor_entity, response["response_text"]) + await _update_sensor(hass, sensor_entity, response["response_text"], type) return response # Register services diff --git a/custom_components/llmvision/media_handlers.py b/custom_components/llmvision/media_handlers.py index 088b16b..d2347d8 100644 --- a/custom_components/llmvision/media_handlers.py +++ b/custom_components/llmvision/media_handlers.py @@ -5,6 +5,7 @@ import logging import time import asyncio +from homeassistant.helpers.aiohttp_client import async_get_clientsession from functools import partial from PIL import Image, UnidentifiedImageError import numpy as np @@ -19,6 +20,7 @@ class MediaProcessor: def __init__(self, hass, client): self.hass = hass + self.session = async_get_clientsession(self.hass) self.client = client self.base64_images = [] self.filenames = [] diff --git a/custom_components/llmvision/providers.py b/custom_components/llmvision/providers.py index ef82190..457aa1e 100644 --- a/custom_components/llmvision/providers.py +++ b/custom_components/llmvision/providers.py @@ -87,13 +87,13 @@ def get_provider(hass, provider_uid): return None - def validate(self, call): + def validate(self, call) -> None | ServiceValidationError: """Validate call data""" # Check image input if not call.base64_images: raise ServiceValidationError(ERROR_NO_IMAGE_INPUT) # Check if single image is provided for Groq - if len(call.base64_images) > 1 and self.get_provider(call.provider) == 'Groq': + if len(call.base64_images) > 1 and self.get_provider(self.hass, call.provider) == 'Groq': raise ServiceValidationError(ERROR_GROQ_MULTIPLE_IMAGES) # Check provider is configured if not call.provider: @@ -105,13 +105,25 @@ def default_model(provider): return { "Azure": "gpt-4o-mini", "Custom OpenAI": "gpt-4o-mini", "Google": "gemini-1.5-flash-latest", - "Groq": "llava-v1.5-7b-4096-preview", + "Groq": "llama-3.2-11b-vision-preview ", "LocalAI": "gpt-4-vision-preview", - "Ollama": "llava-phi3:latest", + "Ollama": "minicpm-v", "OpenAI": "gpt-4o-mini" }.get(provider, "gpt-4o-mini") # Default value async def call(self, call): + """ + Forwards a request to the specified provider and optionally generates a title. + + Args: + call (object): The call object containing request details. + + Raises: + ServiceValidationError: If the provider is invalid. + + Returns: + dict: A dictionary containing the generated title (if any) and the response text. + """ entry_id = call.provider config = self.hass.data.get(DOMAIN).get(entry_id) @@ -121,6 +133,8 @@ async def call(self, call): call.base64_images = self.base64_images call.filenames = self.filenames + self.validate(call) + gen_title_prompt = "Your job is to generate a title in the form ' seen' for texts. Do not mention the time, do not speculate. Generate a title for this text: {response}" if provider == 'OpenAI': @@ -249,6 +263,14 @@ async def _resolve_error(self, response, provider): class Provider(ABC): + """ + Abstract base class for providers + + Args: + hass (object): Home Assistant instance + api_key (str, optional): API key for the provider, defaults to "" + endpoint (dict, optional): Endpoint configuration for the provider + """ def __init__(self, hass, api_key="", @@ -337,6 +359,7 @@ async def _resolve_error(self, response, provider) -> str: class OpenAI(Provider): def __init__(self, hass, api_key="", endpoint={'base_url': ENDPOINT_OPENAI}): super().__init__(hass, api_key, endpoint=endpoint) + self.default_model = "gpt-4o-mini" def _generate_headers(self) -> dict: return {'Content-type': 'application/json', @@ -344,9 +367,7 @@ def _generate_headers(self) -> dict: async def _make_request(self, data) -> str: headers = self._generate_headers() - response = await self._post(url=self.endpoint.get('base_url'), headers=headers, data=data) - response_text = response.get( "choices")[0].get("message").get("content") return response_text @@ -394,6 +415,7 @@ async def validate(self) -> None | ServiceValidationError: class AzureOpenAI(Provider): def __init__(self, hass, api_key="", endpoint={'base_url': "", 'deployment': "", 'api_version': ""}): super().__init__(hass, api_key, endpoint) + self.default_model = "gpt-4o-mini" def _generate_headers(self) -> dict: return {'Content-type': 'application/json', @@ -407,9 +429,7 @@ async def _make_request(self, data) -> str: api_version=self.endpoint.get("api_version") ) - response = await self._post( - url=endpoint, headers=headers, data=data) - + response = await self._post(url=endpoint, headers=headers, data=data) response_text = response.get( "choices")[0].get("message").get("content") return response_text @@ -459,6 +479,7 @@ async def validate(self) -> None | ServiceValidationError: class Anthropic(Provider): def __init__(self, hass, api_key=""): super().__init__(hass, api_key) + self.default_model = "claude-3-5-sonnet-latest" def _generate_headers(self) -> dict: return { @@ -468,8 +489,6 @@ def _generate_headers(self) -> dict: } async def _make_request(self, data) -> str: - api_key = self.api_key - headers = self._generate_headers() response = await self._post(url=ENDPOINT_ANTHROPIC, headers=headers, data=data) response_text = response.get("content")[0].get("text") @@ -490,7 +509,7 @@ def _prepare_vision_data(self, call) -> dict: data["messages"][0]["content"].append({"type": "image", "source": { "type": "base64", "media_type": "image/jpeg", "data": f"{image}"}}) data["messages"][0]["content"].append( - {"type": "text", "text": self.message}) + {"type": "text", "text": call.message}) return data def _prepare_text_data(self, call) -> dict: @@ -520,6 +539,7 @@ async def validate(self) -> None | ServiceValidationError: class Google(Provider): def __init__(self, hass, api_key="", endpoint={'base_url': ENDPOINT_GOOGLE, 'model': "gemini-1.5-flash-latest"}): super().__init__(hass, api_key, endpoint) + self.default_model = "gemini-1.5-flash-latest" def _generate_headers(self) -> dict: return {'content-type': 'application/json'} @@ -567,13 +587,12 @@ async def validate(self) -> None | ServiceValidationError: class Groq(Provider): def __init__(self, hass, api_key=""): super().__init__(hass, api_key) + self.default_model = "llama-3.2-11b-vision-preview" def _generate_headers(self) -> dict: return {'Content-type': 'application/json', 'Authorization': 'Bearer ' + self.api_key} async def _make_request(self, data) -> str: - api_key = self.api_key - headers = self._generate_headers() response = await self._post(url=ENDPOINT_GROQ, headers=headers, data=data) response_text = response.get( @@ -615,11 +634,11 @@ async def validate(self) -> None | ServiceValidationError: raise ServiceValidationError("empty_api_key") headers = self._generate_headers() data = { - "contents": [{ - "parts": [ - {"text": "Hello"} - ]} - ] + "model": "llama3-8b-8192", + "messages": [{ + "role": "user", + "content": "Hi" + }] } await self._post(url=ENDPOINT_GROQ, headers=headers, data=data) @@ -627,6 +646,7 @@ async def validate(self) -> None | ServiceValidationError: class LocalAI(Provider): def __init__(self, hass, api_key="", endpoint={'ip_address': "", 'port': "", 'https': False}): super().__init__(hass, api_key, endpoint) + self.default_model = "gpt-4-vision-preview" async def _make_request(self, data) -> str: endpoint = ENDPOINT_LOCALAI.format( @@ -682,22 +702,19 @@ async def validate(self) -> None | ServiceValidationError: class Ollama(Provider): def __init__(self, hass, api_key="", endpoint={'ip_address': "0.0.0.0", 'port': "11434", 'https': False}): super().__init__(hass, api_key, endpoint) + self.default_model = "minicpm-v" async def _make_request(self, data) -> str: https = self.endpoint.get("https") ip_address = self.endpoint.get("ip_address") port = self.endpoint.get("port") protocol = "https" if https else "http" - endpoint = ENDPOINT_OLLAMA.format( ip_address=ip_address, port=port, protocol=protocol ) - _LOGGER.info( - f"endpoint: {endpoint} https: {https} ip_address: {ip_address} port: {port}") - response = await self._post(url=endpoint, headers={}, data=data) response_text = response.get("message").get("content") return response_text @@ -732,8 +749,11 @@ async def validate(self) -> None | ServiceValidationError: protocol = "https" if self.endpoint.get("https") else "http" try: - response = await session.get(f"{protocol}://{ip_address}:{port}/api/tags") + _LOGGER.info(f"Checking connection to {protocol}://{ip_address}:{port}") + response = await session.get(f"{protocol}://{ip_address}:{port}/api/tags", headers={}) + _LOGGER.info(f"Response: {response}") if response.status != 200: raise ServiceValidationError('handshake_failed') - except Exception: + except Exception as e: + _LOGGER.error(f"Error: {e}") raise ServiceValidationError('handshake_failed') From fd035d0075acdf56d67b8e4a4b55af7cb031b098 Mon Sep 17 00:00:00 2001 From: valentinfrlch Date: Mon, 23 Dec 2024 15:03:20 +0100 Subject: [PATCH 8/9] Fixed validation method for AzureOpenAI --- blueprints/event_summary.yaml | 71 ++++++++++--- custom_components/llmvision/config_flow.py | 4 +- custom_components/llmvision/const.py | 2 +- custom_components/llmvision/providers.py | 110 ++++++++------------- 4 files changed, 102 insertions(+), 85 deletions(-) diff --git a/blueprints/event_summary.yaml b/blueprints/event_summary.yaml index 0521c82..9f70af5 100644 --- a/blueprints/event_summary.yaml +++ b/blueprints/event_summary.yaml @@ -2,7 +2,7 @@ blueprint: name: AI Event Summary (LLM Vision v1.3.1) author: valentinfrlch description: > - AI-powered security event summaries for frigate or camera entities. + AI-powered security event summaries for frigate or camera entities. Sends a notification with a preview to your phone that is updated dynamically when the AI summary is available. domain: automation source_url: https://github.com/valentinfrlch/ha-llmvision/blob/main/blueprints/event_summary.yaml @@ -42,23 +42,43 @@ blueprint: integration: mobile_app camera_entities: name: Camera Entities - description: (Camera and Frigate mode) List of camera entities to monitor + description: >- + (Camera and Frigate mode) + + List of camera entities to monitor default: [] selector: entity: multiple: true filter: domain: camera + object_type: + name: Included Object Type(s) + description: >- + (Frigate mode only) + + Only run if frigate labels the object as one of these. (person, dog, bird, etc) + default: [] + selector: + text: + multiline: false + multiple: true trigger_state: name: Trigger State - description: (Camera mode only) Trigger the automation when your cameras change to this state. + description: >- + (Camera mode only) + + Trigger the automation when your cameras change to this state. default: 'recording' selector: text: multiline: false motion_sensors: name: Motion Sensor - description: (Camera mode only) Set if your cameras don't change state. Use the same order used for camera entities. + description: >- + (Camera mode only) + + Set if your cameras don't change state. Use the same order used for camera entities. default: [] selector: entity: @@ -67,7 +87,10 @@ blueprint: domain: binary_sensor preview_mode: name: Preview Mode - description: (Camera mode only) Choose between a live preview or a snapshot of the event + description: >- + (Camera mode only) + + Choose between a live preview or a snapshot of the event default: 'Live Preview' selector: select: @@ -84,14 +107,21 @@ blueprint: max: 60 tap_navigate: name: Tap Navigate - description: Path to navigate to when notification is opened (e.g. /lovelace/cameras) + description: >- + Path to navigate to when notification is opened (e.g. /lovelace/cameras). + + To have use the same input which was sent to the ai engine, use + `{{video if video != '''' else image}}` default: "/lovelace/0" selector: text: multiline: false duration: name: Duration - description: (Camera mode only) How long to record before analyzing (in seconds) + description: >- + (Camera mode only) + + How long to record before analyzing (in seconds) default: 5 selector: number: @@ -99,7 +129,10 @@ blueprint: max: 60 max_frames: name: Max Frames - description: (Camera and Frigate mode) How many frames to analyze. Picks frames with the most movement. + description: >- + (Camera and Frigate mode) + + How many frames to analyze. Picks frames with the most movement. default: 3 selector: number: @@ -170,11 +203,12 @@ variables: {% set ns = namespace(device_names=[]) %} {% for device_id in notify_devices %} {% set device_name = device_attr(device_id, "name") %} - {% set sanitized_name = "mobile_app_" + device_name | lower | regex_replace("[^a-z0-9 ]", "") | replace(" ", "_") %} + {% set sanitized_name = "mobile_app_" + device_name | lower | regex_replace("[' -]", "_") | regex_replace("[^a-z0-9_]", "") %} {% set ns.device_names = ns.device_names + [sanitized_name] %} {% endfor %} {{ ns.device_names }} camera_entities_list: !input camera_entities + object_types_list: !input object_type motion_sensors_list: !input motion_sensors camera_entity: > {% if mode == 'Camera' %} @@ -230,6 +264,10 @@ variables: Use "critical" only for possible burglaries and similar events. "time-sensitive" could be a courier at the front door or an event of similar importance. Reply with these replies exactly. +max_exceeded: silent + +mode: single + trigger: - platform: mqtt topic: "frigate/events" @@ -247,9 +285,10 @@ condition: - condition: template value_template: > {% if mode == 'Frigate' %} - {{ trigger.payload_json["type"] == "end" and (state_attr(this.entity_id, 'last_triggered') is none or (now() - state_attr(this.entity_id, 'last_triggered')).total_seconds() / 60 > cooldown) and ('camera.' + trigger.payload_json['after']['camera']|lower) in camera_entities_list }} - {% else %} - {{ state_attr(this.entity_id, 'last_triggered') is none or (now() - state_attr(this.entity_id, 'last_triggered')).total_seconds() / 60 > cooldown }} + {{ trigger.payload_json["type"] == "end" + and ('camera.' + trigger.payload_json['after']['camera']|lower) in camera_entities_list + and ((object_types_list|length) == 0 or ((trigger.payload_json['after']['label']|lower) in object_types_list)) + }} {% endif %} @@ -293,7 +332,7 @@ action: max_tokens: 3 temperature: 0.1 response_variable: importance - + # Cancel automation if event not deemed important - choose: - conditions: @@ -365,7 +404,7 @@ action: temperature: !input temperature expose_images: "{{true if preview_mode == 'Snapshot'}}" response_variable: response - + - choose: - conditions: @@ -388,4 +427,6 @@ action: clickAction: !input tap_navigate #Android tag: "{{tag}}" group: "{{group}}" - interruption-level: passive \ No newline at end of file + interruption-level: passive + +- delay: '00:{{cooldown|int}}:00' \ No newline at end of file diff --git a/custom_components/llmvision/config_flow.py b/custom_components/llmvision/config_flow.py index 3c33d3d..4458c26 100644 --- a/custom_components/llmvision/config_flow.py +++ b/custom_components/llmvision/config_flow.py @@ -17,6 +17,7 @@ CONF_AZURE_BASE_URL, CONF_AZURE_DEPLOYMENT, CONF_AZURE_VERSION, + ENDPOINT_AZURE, CONF_ANTHROPIC_API_KEY, CONF_GOOGLE_API_KEY, CONF_GROQ_API_KEY, @@ -188,7 +189,8 @@ async def async_step_azure(self, user_input=None): user_input["provider"] = self.init_info["provider"] try: azure = AzureOpenAI(self.hass, api_key=user_input[CONF_AZURE_API_KEY], endpoint={ - 'base_url': user_input[CONF_AZURE_BASE_URL], + 'base_url': ENDPOINT_AZURE, + 'endpoint': user_input[CONF_AZURE_BASE_URL], 'deployment': user_input[CONF_AZURE_DEPLOYMENT], 'api_version': user_input[CONF_AZURE_VERSION] }) diff --git a/custom_components/llmvision/const.py b/custom_components/llmvision/const.py index f2d4c6b..321baf9 100644 --- a/custom_components/llmvision/const.py +++ b/custom_components/llmvision/const.py @@ -59,4 +59,4 @@ ENDPOINT_GROQ = "https://api.groq.com/openai/v1/chat/completions" ENDPOINT_LOCALAI = "{protocol}://{ip_address}:{port}/v1/chat/completions" ENDPOINT_OLLAMA = "{protocol}://{ip_address}:{port}/api/chat" -ENDPOINT_AZURE = "https://{base_url}/openai/deployments/{deployment}/chat/completions?api-version={api_version}" +ENDPOINT_AZURE = "{base_url}openai/deployments/{deployment}/chat/completions?api-version={api_version}" diff --git a/custom_components/llmvision/providers.py b/custom_components/llmvision/providers.py index 457aa1e..5fefc09 100644 --- a/custom_components/llmvision/providers.py +++ b/custom_components/llmvision/providers.py @@ -23,6 +23,7 @@ CONF_CUSTOM_OPENAI_API_KEY, VERSION_ANTHROPIC, ENDPOINT_OPENAI, + ENDPOINT_AZURE, ENDPOINT_ANTHROPIC, ENDPOINT_GOOGLE, ENDPOINT_LOCALAI, @@ -99,18 +100,6 @@ def validate(self, call) -> None | ServiceValidationError: if not call.provider: raise ServiceValidationError(ERROR_NOT_CONFIGURED) - @staticmethod - def default_model(provider): return { - "Anthropic": "claude-3-5-sonnet-latest", - "Azure": "gpt-4o-mini", - "Custom OpenAI": "gpt-4o-mini", - "Google": "gemini-1.5-flash-latest", - "Groq": "llama-3.2-11b-vision-preview ", - "LocalAI": "gpt-4-vision-preview", - "Ollama": "minicpm-v", - "OpenAI": "gpt-4o-mini" - }.get(provider, "gpt-4o-mini") # Default value - async def call(self, call): """ Forwards a request to the specified provider and optionally generates a title. @@ -128,8 +117,6 @@ async def call(self, call): config = self.hass.data.get(DOMAIN).get(entry_id) provider = Request.get_provider(self.hass, entry_id) - call.model = call.model if call.model and call.model != 'None' else Request.default_model( - provider) call.base64_images = self.base64_images call.filenames = self.filenames @@ -139,102 +126,87 @@ async def call(self, call): if provider == 'OpenAI': api_key = config.get(CONF_OPENAI_API_KEY) - - openai = OpenAI(hass=self.hass, api_key=api_key) - response_text = await openai.vision_request(call) - if call.generate_title: - call.message = gen_title_prompt.format(response=response_text) - gen_title = await openai.title_request(call) + provider_instance = OpenAI(hass=self.hass, api_key=api_key) elif provider == 'Azure': api_key = config.get(CONF_AZURE_API_KEY) - base_url = config.get(CONF_AZURE_BASE_URL) + endpoint = config.get(CONF_AZURE_BASE_URL) deployment = config.get(CONF_AZURE_DEPLOYMENT) version = config.get(CONF_AZURE_VERSION) - azure = AzureOpenAI(self.hass, - api_key=api_key, - endpoint={ - 'base_url': base_url, - 'deployment': deployment, - 'api_version': version - }) - response_text = await azure.vision_request(call) - if call.generate_title: - call.message = gen_title_prompt.format(response=response_text) - gen_title = await azure.title_request(call) + provider_instance = AzureOpenAI(self.hass, + api_key=api_key, + endpoint={ + 'base_url': ENDPOINT_AZURE, + 'endpoint': endpoint, + 'deployment': deployment, + 'api_version': version + }) elif provider == 'Anthropic': api_key = config.get(CONF_ANTHROPIC_API_KEY) - anthropic = Anthropic(self.hass, api_key=api_key) - response_text = await anthropic.vision_request(call) - if call.generate_title: - call.message = gen_title_prompt.format(response=response_text) - gen_title = await anthropic.title_request(call) + provider_instance = Anthropic(self.hass, api_key=api_key) elif provider == 'Google': api_key = config.get(CONF_GOOGLE_API_KEY) - google = Google(self.hass, api_key=api_key, endpoint={ + provider_instance = Google(self.hass, api_key=api_key, endpoint={ 'base_url': ENDPOINT_GOOGLE, 'model': call.model }) - response_text = await google.vision_request(call) - if call.generate_title: - call.message = gen_title_prompt.format(response=response_text) - gen_title = await google.title_request(call) elif provider == 'Groq': api_key = config.get(CONF_GROQ_API_KEY) - groq = Groq(self.hass, api_key=api_key) - response_text = await groq.vision_request(call) - if call.generate_title: - gen_title = await groq.title_request(call) + provider_instance = Groq(self.hass, api_key=api_key) elif provider == 'LocalAI': ip_address = config.get(CONF_LOCALAI_IP_ADDRESS) port = config.get(CONF_LOCALAI_PORT) https = config.get(CONF_LOCALAI_HTTPS, False) - localai = LocalAI(self.hass, endpoint={ + provider_instance = LocalAI(self.hass, endpoint={ 'ip_address': ip_address, 'port': port, 'https': https }) - response_text = await localai.vision_request(call) - if call.generate_title: - call.message = gen_title_prompt.format(response=response_text) - gen_title = await localai.title_request(call) + elif provider == 'Ollama': ip_address = config.get(CONF_OLLAMA_IP_ADDRESS) port = config.get(CONF_OLLAMA_PORT) https = config.get(CONF_OLLAMA_HTTPS, False) - ollama = Ollama(self.hass, endpoint={ + provider_instance = Ollama(self.hass, endpoint={ 'ip_address': ip_address, 'port': port, 'https': https }) - response_text = await ollama.vision_request(call) + response_text = await provider_instance.vision_request(call) if call.generate_title: call.message = gen_title_prompt.format(response=response_text) - gen_title = await ollama.title_request(call) + gen_title = await provider_instance.title_request(call) + elif provider == 'Custom OpenAI': api_key = config.get(CONF_CUSTOM_OPENAI_API_KEY, "") endpoint = config.get( CONF_CUSTOM_OPENAI_ENDPOINT) + "/v1/chat/completions" - - custom_openai = OpenAI( + provider_instance = OpenAI( self.hass, api_key=api_key, endpoint=endpoint) - response_text = await custom_openai.vision_request(api_key, endpoint) - if call.generate_title: - call.message = gen_title_prompt.format(response=response_text) - gen_title = await custom_openai.title_request(api_key=api_key, prompt=gen_title_prompt.format(response=response_text)) + else: raise ServiceValidationError("invalid_provider") - return {"title": gen_title.replace(".", "").replace("'", "") if 'gen_title' in locals() else None, "response_text": response_text} + # Make call to provider + call.model = call.model if call.model and call.model != 'None' else provider_instance.default_model + response_text = await provider_instance.vision_request(call) + + if call.generate_title: + call.message = gen_title_prompt.format(response=response_text) + gen_title = await provider_instance.title_request(call) + + return {"title": gen_title.replace(".", "").replace("'", ""), "response_text": response_text} + else: + return {"response_text": response_text} def add_frame(self, base64_image, filename): self.base64_images.append(base64_image) @@ -271,6 +243,7 @@ class Provider(ABC): api_key (str, optional): API key for the provider, defaults to "" endpoint (dict, optional): Endpoint configuration for the provider """ + def __init__(self, hass, api_key="", @@ -413,7 +386,7 @@ async def validate(self) -> None | ServiceValidationError: class AzureOpenAI(Provider): - def __init__(self, hass, api_key="", endpoint={'base_url': "", 'deployment': "", 'api_version': ""}): + def __init__(self, hass, api_key="", endpoint={'base_url': ENDPOINT_AZURE, 'endpoint': "", 'deployment': "", 'api_version': ""}): super().__init__(hass, api_key, endpoint) self.default_model = "gpt-4o-mini" @@ -423,8 +396,8 @@ def _generate_headers(self) -> dict: async def _make_request(self, data) -> str: headers = self._generate_headers() - endpoint = self.endpoint.get("endpoint").format( - base_url=self.endpoint.get("base_url"), + endpoint = self.endpoint.get("base_url").format( + base_url=self.endpoint.get("endpoint"), deployment=self.endpoint.get("deployment"), api_version=self.endpoint.get("api_version") ) @@ -449,7 +422,7 @@ def _prepare_vision_data(self, call) -> list: "url": f"data:image/jpeg;base64,{image}"}}) payload["messages"][0]["content"].append( {"type": "text", "text": call.message}) - return payload["messages"] + return payload def _prepare_text_data(self, call) -> list: return {"messages": [{"role": "user", "content": [{"type": "text", "text": call.message}]}], @@ -462,8 +435,8 @@ async def validate(self) -> None | ServiceValidationError: if not self.api_key: raise ServiceValidationError("empty_api_key") - endpoint = self.endpoint.get("endpoint").format( - base_url=self.endpoint.get("base_url"), + endpoint = self.endpoint.get("base_url").format( + base_url=self.endpoint.get("endpoint"), deployment=self.endpoint.get("deployment"), api_version=self.endpoint.get("api_version") ) @@ -749,7 +722,8 @@ async def validate(self) -> None | ServiceValidationError: protocol = "https" if self.endpoint.get("https") else "http" try: - _LOGGER.info(f"Checking connection to {protocol}://{ip_address}:{port}") + _LOGGER.info( + f"Checking connection to {protocol}://{ip_address}:{port}") response = await session.get(f"{protocol}://{ip_address}:{port}/api/tags", headers={}) _LOGGER.info(f"Response: {response}") if response.status != 200: From e72cfa90841a853368feb6c3bd32794059731b89 Mon Sep 17 00:00:00 2001 From: valentinfrlch Date: Wed, 25 Dec 2024 16:55:20 +0100 Subject: [PATCH 9/9] Fixed LocalAI validation --- custom_components/llmvision/__init__.py | 3 +-- custom_components/llmvision/providers.py | 4 ++-- custom_components/llmvision/strings.json | 10 ++++++++++ custom_components/llmvision/translations/de.json | 10 ++++++++++ custom_components/llmvision/translations/en.json | 10 ++++++++++ 5 files changed, 33 insertions(+), 4 deletions(-) diff --git a/custom_components/llmvision/__init__.py b/custom_components/llmvision/__init__.py index cf9345b..7ada025 100644 --- a/custom_components/llmvision/__init__.py +++ b/custom_components/llmvision/__init__.py @@ -152,7 +152,6 @@ async def _remember(hass, call, start, response) -> None: # Find semantic index config config_entry = None for entry in hass.config_entries.async_entries(DOMAIN): - _LOGGER.info(f"Entry: {entry.data}") # Check if the config entry is empty if entry.data["provider"] == "Event Calendar": config_entry = entry @@ -372,7 +371,7 @@ async def data_analyzer(data_call): ) response = await request.call(call) _LOGGER.info(f"Response: {response}") - # udpate sensor in data_call.data.get("sensor_entity") + # update sensor in data_call.data.get("sensor_entity") await _update_sensor(hass, sensor_entity, response["response_text"], type) return response diff --git a/custom_components/llmvision/providers.py b/custom_components/llmvision/providers.py index 5fefc09..3e939eb 100644 --- a/custom_components/llmvision/providers.py +++ b/custom_components/llmvision/providers.py @@ -292,7 +292,7 @@ async def _post(self, url, headers, data) -> dict: _LOGGER.info(f"Request data: {Request.sanitize_data(data)}") try: - _LOGGER.info(f"Posting to {url} with headers {headers}") + _LOGGER.info(f"Posting to {url}") response = await self.session.post(url, headers=headers, json=data) except Exception as e: raise ServiceValidationError(f"Request failed: {e}") @@ -628,7 +628,7 @@ async def _make_request(self, data) -> str: port=self.endpoint.get("port") ) - headers = self._generate_headers() + headers = {} response = await self._post(url=endpoint, headers=headers, data=data) response_text = response.get( "choices")[0].get("message").get("content") diff --git a/custom_components/llmvision/strings.json b/custom_components/llmvision/strings.json index 2e038d6..e3b6304 100644 --- a/custom_components/llmvision/strings.json +++ b/custom_components/llmvision/strings.json @@ -30,6 +30,16 @@ "openai_api_key": "Your API key" } }, + "azure": { + "title": "Configure Azure", + "description": "Provide a valid Azure API key, base URL, deployment and API version.\nThe Base URL must be in the format `https://domain.openai.azure.com/` including the trailing slash.", + "data": { + "azure_api_key": "Your API key", + "azure_base_url": "Base URL", + "azure_deployment": "Deployment", + "azure_version": "API Version" + } + }, "anthropic": { "title": "Configure Anthropic Claude", "description": "Provide a valid Anthropic API key.", diff --git a/custom_components/llmvision/translations/de.json b/custom_components/llmvision/translations/de.json index 49956eb..fa8156a 100644 --- a/custom_components/llmvision/translations/de.json +++ b/custom_components/llmvision/translations/de.json @@ -28,6 +28,16 @@ "api_key": "Dein API-key" } }, + "azure": { + "title": "Azure konfigurieren", + "description": "Gib einen gültigen Azure API-key, die Base URL, den Namen des Deployments und die API-Version an.\nDie Base URL muss dieses Format haben: `https://domain.openai.azure.com/` (einschliesslich des abschliessenden '/')", + "data": { + "azure_api_key": "Dein API key", + "azure_base_url": "Base URL", + "azure_deployment": "Deployment", + "azure_version": "API Version" + } + }, "anthropic": { "title": "Anthropic Claude konfigurieren", "description": "Gib einen gültigen Anthropic API-key ein.", diff --git a/custom_components/llmvision/translations/en.json b/custom_components/llmvision/translations/en.json index 2e038d6..e3b6304 100644 --- a/custom_components/llmvision/translations/en.json +++ b/custom_components/llmvision/translations/en.json @@ -30,6 +30,16 @@ "openai_api_key": "Your API key" } }, + "azure": { + "title": "Configure Azure", + "description": "Provide a valid Azure API key, base URL, deployment and API version.\nThe Base URL must be in the format `https://domain.openai.azure.com/` including the trailing slash.", + "data": { + "azure_api_key": "Your API key", + "azure_base_url": "Base URL", + "azure_deployment": "Deployment", + "azure_version": "API Version" + } + }, "anthropic": { "title": "Configure Anthropic Claude", "description": "Provide a valid Anthropic API key.",