diff --git a/docs/notebooks/234-encodec-audio-compression-with-output.rst b/docs/notebooks/234-encodec-audio-compression-with-output.rst index 7e98b009f940ba..cd05bd7302413b 100644 --- a/docs/notebooks/234-encodec-audio-compression-with-output.rst +++ b/docs/notebooks/234-encodec-audio-compression-with-output.rst @@ -587,7 +587,7 @@ like with the original PyTorch models. -.. raw:: html +.. .. raw:: html -
+.. diff --git a/docs/notebooks/236-stable-diffusion-v2-infinite-zoom-with-output.rst b/docs/notebooks/236-stable-diffusion-v2-infinite-zoom-with-output.rst index 75656ed47aa094..7e2ec9efacc6bf 100644 --- a/docs/notebooks/236-stable-diffusion-v2-infinite-zoom-with-output.rst +++ b/docs/notebooks/236-stable-diffusion-v2-infinite-zoom-with-output.rst @@ -1388,7 +1388,7 @@ Run Infinite Zoom video generation `⇑ <#top>`__ -.. raw:: html +.. .. raw:: html - +.. diff --git a/docs/notebooks/236-stable-diffusion-v2-text-to-image-with-output.rst b/docs/notebooks/236-stable-diffusion-v2-text-to-image-with-output.rst index 33a4df82bfdbfc..885e8893389a01 100644 --- a/docs/notebooks/236-stable-diffusion-v2-text-to-image-with-output.rst +++ b/docs/notebooks/236-stable-diffusion-v2-text-to-image-with-output.rst @@ -1024,7 +1024,7 @@ seed for latent state initialization and number of steps. -.. raw:: html +.. .. raw:: html - +.. diff --git a/docs/notebooks/237-segment-anything-with-output.rst b/docs/notebooks/237-segment-anything-with-output.rst index 2db34401ec919d..ecf2d8c0373bcb 100644 --- a/docs/notebooks/237-segment-anything-with-output.rst +++ b/docs/notebooks/237-segment-anything-with-output.rst @@ -937,9 +937,9 @@ point. -.. raw:: html +.. .. raw:: html - +.. Run OpenVINO model in automatic mask generation mode `⇑ <#top>`__ diff --git a/docs/notebooks/240-dolly-2-instruction-following-with-output.rst b/docs/notebooks/240-dolly-2-instruction-following-with-output.rst index 9b450eb9902ce5..7dda0634e62c7a 100644 --- a/docs/notebooks/240-dolly-2-instruction-following-with-output.rst +++ b/docs/notebooks/240-dolly-2-instruction-following-with-output.rst @@ -693,7 +693,7 @@ generation parameters: -.. raw:: html +.. .. raw:: html - +.. diff --git a/docs/notebooks/241-riffusion-text-to-music-with-output.rst b/docs/notebooks/241-riffusion-text-to-music-with-output.rst index d8eb9cb1462095..121ec4aa61f53b 100644 --- a/docs/notebooks/241-riffusion-text-to-music-with-output.rst +++ b/docs/notebooks/241-riffusion-text-to-music-with-output.rst @@ -751,7 +751,7 @@ Interactive demo `⇑ <#top>`__ -.. raw:: html +.. .. raw:: html - +.. diff --git a/docs/notebooks/242-freevc-voice-conversion-with-output.rst b/docs/notebooks/242-freevc-voice-conversion-with-output.rst index 0a372bf31c85b7..3e8ac5bdaaff0d 100644 --- a/docs/notebooks/242-freevc-voice-conversion-with-output.rst +++ b/docs/notebooks/242-freevc-voice-conversion-with-output.rst @@ -800,9 +800,9 @@ inference. Use rate corresponding to the value of -.. raw:: html +.. .. raw:: html - +.. .. code:: ipython3 diff --git a/docs/notebooks/244-named-entity-recognition-with-output.rst b/docs/notebooks/244-named-entity-recognition-with-output.rst index dd6af58fd7bc13..9d49188f04a6f7 100644 --- a/docs/notebooks/244-named-entity-recognition-with-output.rst +++ b/docs/notebooks/244-named-entity-recognition-with-output.rst @@ -439,9 +439,9 @@ text. -.. raw:: html +.. .. raw:: html - +.. .. parsed-literal:: diff --git a/docs/notebooks/248-stable-diffusion-xl-with-output.rst b/docs/notebooks/248-stable-diffusion-xl-with-output.rst index 594fb4f1a7b6e7..0c0451bd8aaee4 100644 --- a/docs/notebooks/248-stable-diffusion-xl-with-output.rst +++ b/docs/notebooks/248-stable-diffusion-xl-with-output.rst @@ -292,9 +292,9 @@ Text2image Generation Interactive Demo\ `⇑ <#top>`__ -.. raw:: html +.. .. raw:: html - +.. .. code:: ipython3 @@ -445,9 +445,9 @@ Image2Image Generation Interactive Demo\ `⇑ <#top>`__ -.. raw:: html +.. .. raw:: html - +.. .. code:: ipython3 diff --git a/docs/notebooks/251-tiny-sd-image-generation-with-output.rst b/docs/notebooks/251-tiny-sd-image-generation-with-output.rst index b2afd5f5c58864..466cb5801d5a35 100644 --- a/docs/notebooks/251-tiny-sd-image-generation-with-output.rst +++ b/docs/notebooks/251-tiny-sd-image-generation-with-output.rst @@ -1056,83 +1056,83 @@ found in this .. image:: 251-tiny-sd-image-generation-with-output_files/251-tiny-sd-image-generation_39_1.png -Interactive Demo `⇑ <#top>`__ -############################################################################################################################### - -.. code:: ipython3 - - import gradio as gr - - sample_img_url = "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/tower.jpg" - - img = load_image(sample_img_url).save("tower.jpg") - - def generate_from_text(text, negative_text, seed, num_steps, _=gr.Progress(track_tqdm=True)): - result = ov_pipe(text, negative_prompt=negative_text, num_inference_steps=num_steps, seed=seed) - return result["sample"][0] - - - def generate_from_image(img, text, negative_text, seed, num_steps, strength, _=gr.Progress(track_tqdm=True)): - result = ov_pipe(text, img, negative_prompt=negative_text, num_inference_steps=num_steps, seed=seed, strength=strength) - return result["sample"][0] - - - with gr.Blocks() as demo: - with gr.Tab("Text-to-Image generation"): - with gr.Row(): - with gr.Column(): - text_input = gr.Textbox(lines=3, label="Positive prompt") - negative_text_input = gr.Textbox(lines=3, label="Negative prompt") - seed_input = gr.Slider(0, 10000000, value=751, label="Seed") - steps_input = gr.Slider(1, 50, value=20, step=1, label="Steps") - out = gr.Image(label="Result", type="pil") - sample_text = "futuristic synthwave city, retro sunset, crystals, spires, volumetric lighting, studio Ghibli style, rendered in unreal engine with clean details" - sample_text2 = "RAW studio photo of tiny cute happy cat in a yellow raincoat in the woods, rain, a character portrait, soft lighting, high resolution, photo realistic, extremely detailed" - negative_sample_text = "" - negative_sample_text2 = "bad anatomy, blurry, noisy, jpeg artifacts, low quality, geometry, mutation, disgusting. ugly" - btn = gr.Button() - btn.click(generate_from_text, [text_input, negative_text_input, seed_input, steps_input], out) - gr.Examples([[sample_text, negative_sample_text, 42, 20], [sample_text2, negative_sample_text2, 1561, 25]], [text_input, negative_text_input, seed_input, steps_input]) - with gr.Tab("Image-to-Image generation"): - with gr.Row(): - with gr.Column(): - i2i_input = gr.Image(label="Image", type="pil") - i2i_text_input = gr.Textbox(lines=3, label="Text") - i2i_negative_text_input = gr.Textbox(lines=3, label="Negative prompt") - i2i_seed_input = gr.Slider(0, 10000000, value=42, label="Seed") - i2i_steps_input = gr.Slider(1, 50, value=10, step=1, label="Steps") - strength_input = gr.Slider(0, 1, value=0.5, label="Strength") - i2i_out = gr.Image(label="Result", type="pil") - i2i_btn = gr.Button() - sample_i2i_text = "amazing watercolor painting" - i2i_btn.click( - generate_from_image, - [i2i_input, i2i_text_input, i2i_negative_text_input, i2i_seed_input, i2i_steps_input, strength_input], - i2i_out, - ) - gr.Examples( - [["tower.jpg", sample_i2i_text, "", 6400023, 40, 0.3]], - [i2i_input, i2i_text_input, i2i_negative_text_input, i2i_seed_input, i2i_steps_input, strength_input], - ) - - try: - demo.queue().launch(debug=True) - except Exception: - demo.queue().launch(share=True, debug=True) - # if you are launching remotely, specify server_name and server_port - # demo.launch(server_name='your server name', server_port='server port in int') - # Read more in the docs: https://gradio.app/docs/ - - -.. parsed-literal:: - - Running on local URL: http://127.0.0.1:7860 - - To create a public link, set `share=True` in `launch()`. - - - -.. raw:: html - - +.. Interactive Demo `⇑ <#top>`__ +.. ############################################################################################################################### + +.. .. code:: ipython3 + +.. import gradio as gr + +.. sample_img_url = "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/tower.jpg" + +.. img = load_image(sample_img_url).save("tower.jpg") + +.. def generate_from_text(text, negative_text, seed, num_steps, _=gr.Progress(track_tqdm=True)): +.. result = ov_pipe(text, negative_prompt=negative_text, num_inference_steps=num_steps, seed=seed) +.. return result["sample"][0] + + +.. def generate_from_image(img, text, negative_text, seed, num_steps, strength, _=gr.Progress(track_tqdm=True)): +.. result = ov_pipe(text, img, negative_prompt=negative_text, num_inference_steps=num_steps, seed=seed, strength=strength) +.. return result["sample"][0] + + +.. with gr.Blocks() as demo: +.. with gr.Tab("Text-to-Image generation"): +.. with gr.Row(): +.. with gr.Column(): +.. text_input = gr.Textbox(lines=3, label="Positive prompt") +.. negative_text_input = gr.Textbox(lines=3, label="Negative prompt") +.. seed_input = gr.Slider(0, 10000000, value=751, label="Seed") +.. steps_input = gr.Slider(1, 50, value=20, step=1, label="Steps") +.. out = gr.Image(label="Result", type="pil") +.. sample_text = "futuristic synthwave city, retro sunset, crystals, spires, volumetric lighting, studio Ghibli style, rendered in unreal engine with clean details" +.. sample_text2 = "RAW studio photo of tiny cute happy cat in a yellow raincoat in the woods, rain, a character portrait, soft lighting, high resolution, photo realistic, extremely detailed" +.. negative_sample_text = "" +.. negative_sample_text2 = "bad anatomy, blurry, noisy, jpeg artifacts, low quality, geometry, mutation, disgusting. ugly" +.. btn = gr.Button() +.. btn.click(generate_from_text, [text_input, negative_text_input, seed_input, steps_input], out) +.. gr.Examples([[sample_text, negative_sample_text, 42, 20], [sample_text2, negative_sample_text2, 1561, 25]], [text_input, negative_text_input, seed_input, steps_input]) +.. with gr.Tab("Image-to-Image generation"): +.. with gr.Row(): +.. with gr.Column(): +.. i2i_input = gr.Image(label="Image", type="pil") +.. i2i_text_input = gr.Textbox(lines=3, label="Text") +.. i2i_negative_text_input = gr.Textbox(lines=3, label="Negative prompt") +.. i2i_seed_input = gr.Slider(0, 10000000, value=42, label="Seed") +.. i2i_steps_input = gr.Slider(1, 50, value=10, step=1, label="Steps") +.. strength_input = gr.Slider(0, 1, value=0.5, label="Strength") +.. i2i_out = gr.Image(label="Result", type="pil") +.. i2i_btn = gr.Button() +.. sample_i2i_text = "amazing watercolor painting" +.. i2i_btn.click( +.. generate_from_image, +.. [i2i_input, i2i_text_input, i2i_negative_text_input, i2i_seed_input, i2i_steps_input, strength_input], +.. i2i_out, +.. ) +.. gr.Examples( +.. [["tower.jpg", sample_i2i_text, "", 6400023, 40, 0.3]], +.. [i2i_input, i2i_text_input, i2i_negative_text_input, i2i_seed_input, i2i_steps_input, strength_input], +.. ) + +.. try: +.. demo.queue().launch(debug=True) +.. except Exception: +.. demo.queue().launch(share=True, debug=True) +.. # if you are launching remotely, specify server_name and server_port +.. # demo.launch(server_name='your server name', server_port='server port in int') +.. # Read more in the docs: https://gradio.app/docs/ + + +.. .. parsed-literal:: + +.. Running on local URL: http://127.0.0.1:7860 + +.. To create a public link, set `share=True` in `launch()`. + + + +.. .. raw:: html + +..