Skip to content

Commit

Permalink
Add some options to messages in gui, saves generated images local
Browse files Browse the repository at this point in the history
  • Loading branch information
hlohaus committed May 19, 2024
1 parent c8d61a0 commit 80b2e9b
Show file tree
Hide file tree
Showing 10 changed files with 109 additions and 37 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -64,4 +64,5 @@ dist.py
x.txt
bench.py
to-reverse.txt
g4f/Provider/OpenaiChat2.py
g4f/Provider/OpenaiChat2.py
generated_images/
7 changes: 6 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,12 @@ As per the survey, here is a list of improvements to come

```sh
docker pull hlohaus789/g4f
docker run -p 8080:8080 -p 1337:1337 -p 7900:7900 --shm-size="2g" -v ${PWD}/har_and_cookies:/app/har_and_cookies hlohaus789/g4f:latest
docker run \
-p 8080:8080 -p 1337:1337 -p 7900:7900 \
--shm-size="2g" \
-v ${PWD}/har_and_cookies:/app/har_and_cookies \
-v ${PWD}/generated_images:/app/generated_images \
hlohaus789/g4f:latest
```

3. **Access the Client:**
Expand Down
9 changes: 2 additions & 7 deletions g4f/Provider/needs_auth/Gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
import json
import random
import re
import base64

from aiohttp import ClientSession, BaseConnector

Expand Down Expand Up @@ -193,14 +192,10 @@ async def create_async_generator(
yield content
if image_prompt:
images = [image[0][3][3] for image in response_part[4][0][12][7][0]]
resolved_images = []
if response_format == "b64_json":
for image in images:
async with client.get(image) as response:
data = base64.b64encode(await response.content.read()).decode()
resolved_images.append(data)
yield ImageDataResponse(resolved_images, image_prompt)
yield ImageResponse(images, image_prompt, {"cookies": cls._cookies})
else:
resolved_images = []
preview = []
for image in images:
async with client.get(image, allow_redirects=False) as fetch:
Expand Down
3 changes: 2 additions & 1 deletion g4f/client/async_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,8 @@ async def iter_image_response(
if isinstance(chunk, ImageProviderResponse):
if response_format == "b64_json":
async with ClientSession(
connector=get_connector(connector, proxy)
connector=get_connector(connector, proxy),
cookies=chunk.options.get("cookies")
) as session:
async def fetch_image(image):
async with session.get(image) as response:
Expand Down
4 changes: 2 additions & 2 deletions g4f/gui/client/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,10 @@
<script type="module" src="https://cdn.jsdelivr.net/npm/mistral-tokenizer-js" async>
import mistralTokenizer from "mistral-tokenizer-js"
</script>
<script type="module" src="https://belladoreai.github.io/llama-tokenizer-js/llama-tokenizer.js" async>
<script type="module" src="https://cdn.jsdelivr.net/gh/belladoreai/llama-tokenizer-js@master/llama-tokenizer.js" async>
import llamaTokenizer from "llama-tokenizer-js"
</script>
<script src="https://unpkg.com/gpt-tokenizer/dist/cl100k_base.js" async></script>
<script src="https://cdn.jsdelivr.net/npm/gpt-tokenizer/dist/cl100k_base.js" async></script>
<script src="/static/js/text_to_speech/index.js" async></script>
<!--
<script src="/static/js/whisper-web/index.js" async></script>
Expand Down
42 changes: 32 additions & 10 deletions g4f/gui/client/static/css/style.css
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,14 @@ body {
padding-bottom: 0;
}

.message.print {
height: 100%;
position: absolute;
background-color: #fff;
z-index: 100;
top: 0;
}

.message.regenerate {
opacity: 0.75;
}
Expand Down Expand Up @@ -339,14 +347,14 @@ body {
flex-wrap: wrap;
}

.message .content,
.message .content a:link,
.message .content a:visited{
.message .content_inner,
.message .content_inner a:link,
.message .content_inner a:visited{
font-size: 15px;
line-height: 1.3;
color: var(--colour-3);
}
.message .content pre{
.message .content_inner pre{
white-space: pre-wrap;
}

Expand Down Expand Up @@ -389,19 +397,19 @@ body {

.message .count .fa-clipboard,
.message .count .fa-volume-high,
.message .count .fa-rotate {
.message .count .fa-rotate,
.message .count .fa-print {
z-index: 1000;
cursor: pointer;
}

.message .count .fa-clipboard {
.message .count .fa-clipboard,
.message .count .fa-whatsapp {
color: var(--colour-3);
}

.message .count .fa-clipboard.clicked {
color: var(--accent);
}

.message .count .fa-clipboard.clicked,
.message .count .fa-print.clicked,
.message .count .fa-volume-high.active {
color: var(--accent);
}
Expand Down Expand Up @@ -1121,4 +1129,18 @@ a:-webkit-any-link {
50% {
opacity: 0;
}
}

@media print {
#systemPrompt:placeholder-shown,
.conversations,
.conversation .user-input,
.conversation .buttons,
.conversation .toolbar,
.conversation .slide-systemPrompt,
.message .count i,
.message .assistant,
.message .user {
display: none;
}
}
24 changes: 24 additions & 0 deletions g4f/gui/client/static/js/chat.v1.js
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,26 @@ const register_message_buttons = async () => {
})
}
});
document.querySelectorAll(".message .fa-whatsapp").forEach(async (el) => {
if (!el.parentElement.href) {
const text = el.parentElement.parentElement.parentElement.innerText;
el.parentElement.href = `https://wa.me/?text=${encodeURIComponent(text)}`;
}
});
document.querySelectorAll(".message .fa-print").forEach(async (el) => {
if (!("click" in el.dataset)) {
el.dataset.click = "true";
el.addEventListener("click", async () => {
const message_el = el.parentElement.parentElement.parentElement;
el.classList.add("clicked");
message_box.scrollTop = 0;
message_el.classList.add("print");
setTimeout(() => el.classList.remove("clicked"), 1000);
setTimeout(() => message_el.classList.remove("print"), 1000);
window.print()
})
}
});
}

const delete_conversations = async () => {
Expand Down Expand Up @@ -253,6 +273,8 @@ const handle_ask = async () => {
${count_words_and_tokens(message, get_selected_model())}
<i class="fa-solid fa-volume-high"></i>
<i class="fa-regular fa-clipboard"></i>
<a><i class="fa-brands fa-whatsapp"></i></a>
<i class="fa-solid fa-print"></i>
</div>
</div>
</div>
Expand Down Expand Up @@ -625,6 +647,8 @@ const load_conversation = async (conversation_id, scroll=true) => {
${count_words_and_tokens(item.content, next_provider?.model)}
<i class="fa-solid fa-volume-high"></i>
<i class="fa-regular fa-clipboard"></i>
<a><i class="fa-brands fa-whatsapp"></i></a>
<i class="fa-solid fa-print"></i>
</div>
</div>
</div>
Expand Down
46 changes: 35 additions & 11 deletions g4f/gui/server/api.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,27 @@
from __future__ import annotations

import logging
import json
from typing import Iterator
import os
import os.path
import uuid
import asyncio
import time
from aiohttp import ClientSession
from typing import Iterator, Optional
from flask import send_from_directory

from g4f import version, models
from g4f import get_last_provider, ChatCompletion
from g4f.errors import VersionNotFoundError
from g4f.image import ImagePreview
from g4f.typing import Cookies
from g4f.image import ImagePreview, ImageResponse, is_accepted_format
from g4f.requests.aiohttp import get_connector
from g4f.Provider import ProviderType, __providers__, __map__
from g4f.providers.base_provider import ProviderModelMixin, FinishReason
from g4f.providers.conversation import BaseConversation

conversations: dict[dict[str, BaseConversation]] = {}
images_dir = "./generated_images"

class Api():

Expand Down Expand Up @@ -110,14 +119,8 @@ def get_version():
"latest_version": version.utils.latest_version,
}

def generate_title(self):
"""
Generates and returns a title based on the request data.
Returns:
dict: A dictionary with the generated title.
"""
return {'title': ''}
def serve_images(self, name):
return send_from_directory(os.path.abspath(images_dir), name)

def _prepare_conversation_kwargs(self, json_data: dict, kwargs: dict):
"""
Expand Down Expand Up @@ -185,6 +188,27 @@ def _create_response_stream(self, kwargs: dict, conversation_id: str, provider:
yield self._format_json("message", get_error_message(chunk))
elif isinstance(chunk, ImagePreview):
yield self._format_json("preview", chunk.to_string())
elif isinstance(chunk, ImageResponse):
async def copy_images(images: list[str], cookies: Optional[Cookies] = None):
async with ClientSession(
connector=get_connector(None, os.environ.get("G4F_PROXY")),
cookies=cookies
) as session:
async def copy_image(image):
async with session.get(image) as response:
target = os.path.join(images_dir, f"{int(time.time())}_{str(uuid.uuid4())}")
with open(target, "wb") as f:
async for chunk in response.content.iter_any():
f.write(chunk)
with open(target, "rb") as f:
extension = is_accepted_format(f.read(12)).split("/")[-1]
extension = "jpg" if extension == "jpeg" else extension
new_target = f"{target}.{extension}"
os.rename(target, new_target)
return f"/images/{os.path.basename(new_target)}"
return await asyncio.gather(*[copy_image(image) for image in images])
images = asyncio.run(copy_images(chunk.get_list(), chunk.options.get("cookies")))
yield self._format_json("content", str(ImageResponse(images, chunk.alt)))
elif not isinstance(chunk, FinishReason):
yield self._format_json("content", str(chunk))
except Exception as e:
Expand Down
8 changes: 4 additions & 4 deletions g4f/gui/server/backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,13 +47,13 @@ def __init__(self, app: Flask) -> None:
'function': self.handle_conversation,
'methods': ['POST']
},
'/backend-api/v2/gen.set.summarize:title': {
'function': self.generate_title,
'methods': ['POST']
},
'/backend-api/v2/error': {
'function': self.handle_error,
'methods': ['POST']
},
'/images/<path:name>': {
'function': self.serve_images,
'methods': ['GET']
}
}

Expand Down
Empty file added generated_images/.gitkeep
Empty file.

0 comments on commit 80b2e9b

Please sign in to comment.